Spaces:
Runtime error
Runtime error
Upload 4 files
Browse files- Dockerfile +10 -0
- app.py +15 -0
- llm.py +22 -0
- requirements.txt +11 -0
Dockerfile
ADDED
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
FROM tesseractshadow/tesseract4re
|
2 |
+
|
3 |
+
WORKDIR /code
|
4 |
+
|
5 |
+
COPY . /code
|
6 |
+
|
7 |
+
RUN pip install -r requirements.txt
|
8 |
+
|
9 |
+
|
10 |
+
CMD ["streamlit", "run", "app.py", "--host" , "0.0.0.0" , "--server.port", "7860"]
|
app.py
ADDED
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
import cv2
|
3 |
+
from llm import llm
|
4 |
+
import numpy as np
|
5 |
+
image = st.file_uploader("Choose a clear image of the Neutrition Facts", type=["jpg", "jpeg", "png"])
|
6 |
+
|
7 |
+
# Display the uploaded image
|
8 |
+
if image is not None:
|
9 |
+
image = np.asarray(bytearray(image.read()), dtype=np.uint8)
|
10 |
+
|
11 |
+
# Read the image using OpenCV
|
12 |
+
img = cv2.imdecode(image, cv2.IMREAD_COLOR)
|
13 |
+
st.image(img, caption="Uploaded Image.", use_column_width=True)
|
14 |
+
|
15 |
+
st.markdown(llm(img))
|
llm.py
ADDED
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import pytesseract
|
2 |
+
|
3 |
+
import google.generativeai as palm
|
4 |
+
api_key = 'AIzaSyB7-RzBwTAfVA-7ZGk2mEOQwOxshpwzhpM' # put your API key here
|
5 |
+
palm.configure(api_key=api_key)
|
6 |
+
models = [m for m in palm.list_models() if 'generateText' in m.supported_generation_methods]
|
7 |
+
model = models[0].name
|
8 |
+
|
9 |
+
def llm(img):
|
10 |
+
text = pytesseract.image_to_string(img, lang='eng')
|
11 |
+
# generate text
|
12 |
+
prompt = "take this peace of information and give all the information in point wise better format also give some recomendation related to them, if you don't get any nutrition content simply reply 'I don't seem have any knowledge of the perticular Nutrition Content' " + text
|
13 |
+
# print(prompt)
|
14 |
+
text = palm.generate_text(
|
15 |
+
prompt=prompt,
|
16 |
+
model=model,
|
17 |
+
temperature=0.5,
|
18 |
+
max_output_tokens=2000,
|
19 |
+
top_p=0.9,
|
20 |
+
top_k=40,
|
21 |
+
)
|
22 |
+
return text.result
|
requirements.txt
ADDED
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Core Libraries
|
2 |
+
python>=3.11
|
3 |
+
numpy>=1.21.0
|
4 |
+
scipy>=1.7.3
|
5 |
+
pytesseract
|
6 |
+
|
7 |
+
# Web Application Framework
|
8 |
+
streamlit>=1.28.0
|
9 |
+
|
10 |
+
# Package Management
|
11 |
+
pip==23.3.2
|