File size: 1,441 Bytes
9bbc6a1
bb3f9cc
52f816a
3f178c9
11a4d91
 
9bbc6a1
11a4d91
 
 
52f816a
9bbc6a1
 
 
7847a08
11a4d91
9bbc6a1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0e85263
 
 
 
 
 
9bbc6a1
 
0e85263
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
import gradio as gr
import cv2
from ultralytics import YOLO
#!pip install  transformers
#from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch
# Load Pretrained Model and Tokenizer
model_path = "best.pt"
#tokenizer = AutoTokenizer.from_pretrained(model)
#model = AutoModelForSequenceClassification.from_pretrained(model)
loaded_model = YOLO(model_path)
# Define the Gradio Interface
import easyocr
def image_classifier(img):
    
    res = loaded_model.predict(img,conf=0.25)
    box = res[0].boxes.xywh[0]
    bounding_box = box.cpu().numpy()
    x0 = bounding_box[0] - bounding_box[2] / 2
    x1 = bounding_box[0] + bounding_box[2] / 2
    y0 = bounding_box[1] - bounding_box[3] / 2
    y1 = bounding_box[1] + bounding_box[3] / 2

    start_point = (int(x0), int(y0))
    end_point = (int(x1), int(y1))
    cv2.rectangle(img, start_point, end_point, color=(0,255,0), thickness=2)
    # Use the easyocr reader for English language
    reader = easyocr.Reader(['en'])

    # Perform OCR on the input image
    result = reader.readtext(img,allowlist="0123456789")

    # Extract text and bounding box coordinates
    text_and_coordinates = [(entry[1], entry[0]) for entry in result]
    return text_and_coordinates

demo = gr.Interface(
    fn=image_classifier,
    inputs=gr.Image(type="numpy"),
    outputs=gr.Textbox())

demo.launch(debug = True ,share = True)

# Launch the Gradio Interface
demo.launch()