mennamostafa55555 commited on
Commit
e9b261e
1 Parent(s): 45125dd

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +83 -10
app.py CHANGED
@@ -47,7 +47,7 @@ def yolov8_inference(
47
 
48
 
49
  return annotated_image
50
-
51
  image_input = gr.inputs.Image() # Adjust the shape according to your requirements
52
 
53
  inputs = [
@@ -60,17 +60,90 @@ inputs = [
60
 
61
  outputs = gr.Image(type="filepath", label="Output Image")
62
  title = "OCR Demo"
 
63
  examples = [
64
  ["ocr1.jpg", 0.6, 0.45],
65
  ["ocr2.jpg", 0.25, 0.45],
66
  ["ocr3.jpg", 0.25, 0.45],
67
  ]
68
- demo_app = gr.Interface(examples=examples,
69
- fn=yolov8_inference,
70
- inputs=inputs,
71
- outputs=outputs,
72
- title=title,
73
- cache_examples=True,
74
- theme="default",
75
- )
76
- demo_app.launch(debug=False, enable_queue=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
47
 
48
 
49
  return annotated_image
50
+ '''
51
  image_input = gr.inputs.Image() # Adjust the shape according to your requirements
52
 
53
  inputs = [
 
60
 
61
  outputs = gr.Image(type="filepath", label="Output Image")
62
  title = "OCR Demo"
63
+ '''
64
  examples = [
65
  ["ocr1.jpg", 0.6, 0.45],
66
  ["ocr2.jpg", 0.25, 0.45],
67
  ["ocr3.jpg", 0.25, 0.45],
68
  ]
69
+ outputs_images = [
70
+ ["1.jpg"], # First example: an output image for the cat example
71
+ ["2.jpg"] # Second example: an output image for the dog example
72
+ ,["3.jpg"]
73
+ ]
74
+
75
+ readme_html = """
76
+ <html>
77
+ <head>
78
+ <style>
79
+ .description {
80
+ margin: 20px;
81
+ padding: 10px;
82
+ border: 1px solid #ccc;
83
+ }
84
+ </style>
85
+ </head>
86
+ <body>
87
+ <div class="description">
88
+ <p><strong>More details:</strong></p>
89
+ <p>We present a demo for performing object segmentation using a model trained on OCR-Receipt dataset. The model was trained on 54 training images and validated on 15 images.</p>
90
+ <p><strong>Usage:</strong></p>
91
+ <p>You can upload receipt images, and the demo will provide you with your segmented image.</p>
92
+ <p><strong>Dataset:</strong></p>
93
+ <p>This dataset comprises a total of 77 images, which are divided into three distinct sets for various purposes:</p>
94
+ <ul>
95
+ <li><strong>Training Set:</strong> It includes 54 images and is intended for training the model.</li>
96
+ <li><strong>Validation Set:</strong> There are 15 images in the validation set, which is used for optimizing model parameters during development.</li>
97
+ <li><strong>Test Set:</strong> This set consists of 8 images and serves as a separate evaluation dataset to assess the performance of trained models.</li>
98
+ </ul>
99
+ <p><strong>License:</strong> This dataset is made available under the Creative Commons Attribution 4.0 International License (CC BY 4.0).</p>
100
+ <p>To access and download this dataset, please follow this link: <a href=" https://universe.roboflow.com/study-0w9zw/ocr-receipt" target="_blank">Dataset Download</a></p>
101
+
102
+
103
+ </body>
104
+ </html>
105
+ """
106
+ with gr.Blocks() as demo:
107
+ gr.Markdown(
108
+ """
109
+ <div style="text-align: center;">
110
+ <h1>OCR Demo</h1>
111
+ Powered by <a href="https://Tuba.ai">Tuba</a>
112
+ </div>
113
+ """
114
+ )
115
+
116
+
117
+ # Define the input components and add them to the layout
118
+ with gr.Row():
119
+ image_input = gr.inputs.Image()
120
+
121
+
122
+ outputs = gr.Image(type="filepath", label="Output Image")
123
+
124
+ # Define the output component and add it to the layout
125
+ with gr.Row():
126
+ conf_slider=gr.Slider(minimum=0.0, maximum=1.0, value=0.25, step=0.05, label="Confidence Threshold" )
127
+ with gr.Row():
128
+ IOU_Slider=gr.Slider(minimum=0.0, maximum=1.0, value=0.45, step=0.05, label="IOU Threshold")
129
+
130
+
131
+
132
+
133
+ button = gr.Button("Run")
134
+
135
+
136
+ # Define the event listener that connects the input and output components and triggers the function
137
+ button.click(fn=yolov8_inference, inputs=[image_input, conf_slider,IOU_Slider], outputs=outputs, api_name="yolov8_inference")
138
+
139
+ gr.Examples(
140
+ fn=yolov8_inference,
141
+ examples=examples,
142
+ inputs=[image_input, conf_slider,IOU_Slider],
143
+ outputs=[outputs]
144
+ )
145
+ # gr.Examples(inputs=examples, outputs=outputs_images)
146
+ # Add the description below the layout
147
+ gr.Markdown(readme_html)
148
+ # Launch the app
149
+ demo.launch(share=False)