SalmanML commited on
Commit
582a7fc
·
verified ·
1 Parent(s): 831a314

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +69 -17
app.py CHANGED
@@ -1,22 +1,74 @@
1
- from flair.data import Sentence
2
- from flair.models import SequenceTagger
3
- import streamlit as st
4
 
5
- # load tagger
6
- tagger = SequenceTagger.load("flair/ner-english-large")
7
 
8
- # make example sentence
9
- text=st.text_area("Enter the text to detect it's named entities")
10
- sentence = Sentence(text)
11
 
12
- # predict NER tags
13
- tagger.predict(sentence)
14
 
15
- # print sentence
16
- print(sentence)
17
 
18
- # print predicted NER spans
19
- print('The following NER tags are found:')
20
- # iterate over entities and printx
21
- for entity in sentence.get_spans('ner'):
22
- print(entity)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # from flair.data import Sentence
2
+ # from flair.models import SequenceTagger
3
+ # import streamlit as st
4
 
5
+ # # load tagger
6
+ # tagger = SequenceTagger.load("flair/ner-english-large")
7
 
8
+ # # make example sentence
9
+ # text=st.text_area("Enter the text to detect it's named entities")
10
+ # sentence = Sentence(text)
11
 
12
+ # # predict NER tags
13
+ # tagger.predict(sentence)
14
 
15
+ # # print sentence
16
+ # print(sentence)
17
 
18
+ # # print predicted NER spans
19
+ # print('The following NER tags are found:')
20
+ # # iterate over entities and printx
21
+ # for entity in sentence.get_spans('ner'):
22
+ # print(entity)
23
+
24
+
25
+
26
+ import easyocr
27
+ import cv2
28
+ import requests
29
+ import re
30
+ from PIL import Image
31
+
32
+ ## Image uploading function ##
33
+ def image_upload_and_ocr(reader):
34
+ uploaded_file=st.file_uploader(label=':red[**please upload a busines card** :sunglasses:]',type=['jpeg','jpg','png','webp'])
35
+ if uploaded_file is not None:
36
+ image=Image.open(uploaded_file)
37
+ image=image.resize((640,480))
38
+ result = reader.readtext(image)
39
+ result2=result
40
+ texts = [item[1] for item in result]
41
+ result=' '.join(texts)
42
+ return result,result2
43
+
44
+
45
+
46
+ ### DRAWING DETECTION FUNCTION ###
47
+ def drawing_detection(image):
48
+ # Draw bounding boxes around the detected text regions
49
+ for detection in image:
50
+ # Extract the bounding box coordinates
51
+ points = detection[0] # List of points defining the bounding box
52
+ x1, y1 = int(points[0][0]), int(points[0][1]) # Top-left corner
53
+ x2, y2 = int(points[2][0]), int(points[2][1]) # Bottom-right corner
54
+
55
+ # Draw the bounding box
56
+ cv2.rectangle(image, (x1, y1), (x2, y2), (0, 255, 0), 2)
57
+
58
+ # Add the detected text
59
+ text = detection[1]
60
+ cv2.putText(image, text, (x1, y1 - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 255, 0), 2)
61
+ st.image(image,caption='Detected text on the card ',width=710)
62
+ return image
63
+
64
+ # Load the EasyOCR reader
65
+ reader = easyocr.Reader(['en'])
66
+
67
+ st.title("_Business_ card data extractor using opencv and streamlit :sunglasses:")
68
+ result,result2=image_upload_and_ocr(reader)
69
+ darwing_image=drawing_detection(result2)
70
+
71
+
72
+
73
+
74
+