SUBHROJM commited on
Commit
9bbc6a1
1 Parent(s): b72ad59

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +44 -0
app.py ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from transformers import AutoTokenizer, AutoModelForSequenceClassification
3
+
4
+ # Load Pretrained Model and Tokenizer
5
+ model_name = "text/sentiment-analysis"
6
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
7
+ model = AutoModelForSequenceClassification.from_pretrained(model_name)
8
+
9
+ # Define the Gradio Interface
10
+ import easyocr
11
+ def image_classifier(img):
12
+ model = YOLO('/content/drive/MyDrive/SIH_2023/YOLO/runs/detect/train/weights/best.pt')
13
+ res = model.predict(img,conf=0.25)
14
+ box = res[0].boxes.xywh[0]
15
+ bounding_box = box.cpu().numpy()
16
+ x0 = bounding_box[0] - bounding_box[2] / 2
17
+ x1 = bounding_box[0] + bounding_box[2] / 2
18
+ y0 = bounding_box[1] - bounding_box[3] / 2
19
+ y1 = bounding_box[1] + bounding_box[3] / 2
20
+
21
+ start_point = (int(x0), int(y0))
22
+ end_point = (int(x1), int(y1))
23
+ cv2.rectangle(img, start_point, end_point, color=(0,255,0), thickness=2)
24
+ # Use the easyocr reader for English language
25
+ reader = easyocr.Reader(['en'])
26
+
27
+ # Perform OCR on the input image
28
+ result = reader.readtext(img,allowlist="0123456789")
29
+
30
+ # Extract text and bounding box coordinates
31
+ text_and_coordinates = [(entry[1], entry[0]) for entry in result]
32
+ return text_and_coordinates
33
+
34
+ iface = gr.Interface(
35
+ fn=predict_sentiment,
36
+ inputs="text",
37
+ outputs="text",
38
+ live=True,
39
+ title="Sentiment Analysis",
40
+ description="Enter a sentence to predict sentiment.",
41
+ )
42
+
43
+ # Launch the Gradio Interface
44
+ iface.launch()