rrighart commited on
Commit
6d4f35c
1 Parent(s): a5af696

changes app

Browse files
Files changed (2) hide show
  1. __pycache__/app.cpython-38.pyc +0 -0
  2. app.py +32 -14
__pycache__/app.cpython-38.pyc CHANGED
Binary files a/__pycache__/app.cpython-38.pyc and b/__pycache__/app.cpython-38.pyc differ
 
app.py CHANGED
@@ -1,20 +1,38 @@
1
  import gradio as gr
2
  import os
 
3
 
 
 
4
 
5
- def image_mod(image):
6
- return image.rotate(45)
 
 
7
 
8
- demo = gr.Interface(
9
- image_mod,
10
- gr.Image(type="pil"),
11
- "image",
12
- flagging_options=["blurry", "incorrect", "other"],
13
- examples=[
14
- os.path.join(os.path.dirname(__file__), "example1.JPG"),
15
- os.path.join(os.path.dirname(__file__), "example2.JPG"),
16
- os.path.join(os.path.dirname(__file__), "example3.JPG"),
17
- ],
18
- )
19
 
20
- demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import gradio as gr
2
  import os
3
+ import torch
4
 
5
+ def update_value(val):
6
+ return f'Value is set to {val}'
7
 
8
+ def yolov7_inference(
9
+ image: gr.Image = None,
10
+ conf_threshold: gr.Slider = 0.20,
11
+ ):
12
 
13
+ device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
14
+ path = 'y7-prdef.pt'
15
+ model = torch.hub.load("WongKinYiu/yolov7","custom",f"{path}")
16
+ model.conf = conf_threshold
17
+ results = model([image], size=640)
18
+ return results.render()[0]
 
 
 
 
 
19
 
20
+ inputs = [
21
+ gr.Image(label="input image"),
22
+ gr.Slider(minimum=0, maximum=1, step=0.1, label='Value'),
23
+ ]
24
+
25
+ outputs = [
26
+ gr.Image(label="output image"),
27
+ ]
28
+
29
+ gr.Interface(
30
+ fn = yolov7_inference,
31
+ inputs = inputs,
32
+ outputs = outputs,
33
+ title = "- The detection of jar lid defects using Yolov7 -",
34
+ description = "contact: [email protected]",
35
+
36
+ examples = [["example1.JPG"], ["example2.JPG"], ["example3.JPG"]],
37
+
38
+ ).launch(debug=True)