Garvitj commited on
Commit
339ebaf
·
verified ·
1 Parent(s): 33e8652

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +170 -46
app.py CHANGED
@@ -1,4 +1,119 @@
1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2
  import gradio as gr
3
  from transformers import pipeline
4
  import pytesseract
@@ -10,32 +125,40 @@ import requests
10
  # Initialize sentence transformer model
11
  model1 = SentenceTransformer('sentence-transformers/all-MiniLM-L6-v2')
12
 
13
- # Hugging Face API details
14
- API_URL = "https://api-inference.huggingface.co/models/openai-community/gpt2"
15
- headers = {"Authorization": f"Bearer {hf_TsCTtXxnvpmhFKABqKmcVLyLEhjQPsITSVx}"}
16
 
17
- # Function to interact with Hugging Face API for GPT-2
18
- def query(payload):
19
- response = requests.post(API_URL, headers=headers, json=payload)
20
- return response.json()
 
21
 
22
- # Function to generate text response from GPT-2 model using Hugging Face API
23
- def generate_response(prompt):
24
- response = query({"inputs": prompt})
25
-
26
- # Check if the response contains the expected format
27
- if isinstance(response, list) and len(response) > 0 and 'generated_text' in response[0]:
28
- return response[0]['generated_text']
29
- else:
30
- # Log the response if something unexpected is returned
31
- print("Unexpected response format:", response)
32
- return "Sorry, I couldn't generate a response."
33
-
34
 
35
- # Function to generate text response from GPT-2 model using Hugging Face API
36
- # def generate_response(prompt):
37
- # response = query({"inputs": prompt})
38
- # return response[0]['generated_text']
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
39
 
40
  # Extract text from an image using Tesseract
41
  def extract_text_from_image(filepath: str, languages: List[str]):
@@ -74,37 +197,38 @@ def evaluate_answer(image, languages):
74
  similarity_score = calculate_similarity(student_answer, model_answer)
75
  grade = get_grade(similarity_score)
76
  feedback = f"Student's answer: {student_answer}\nTeacher's answer: {model_answer}"
77
- prompt=f"the student got grades: {grade} when Student's answer is: {student_answer} and Teacher's answer is: {model_answer}. justify the grades given to student"
78
  return grade, similarity_score * 100, feedback, prompt
79
 
80
  # Main interface function for Gradio
81
- def gradio_interface(image, languages: List[str], prompt=""):
82
- grade, similarity_score, feedback,prompt = evaluate_answer(image, languages)
83
- response = generate_response(prompt)
 
 
84
  return grade, similarity_score, feedback, response
85
 
86
  # Get available Tesseract languages
87
  language_choices = pytesseract.get_languages()
88
 
89
  # Define Gradio interface
90
- interface = gr.Interface(
91
- fn=gradio_interface,
92
- inputs=[
93
- gr.Image(type="filepath", label="Input"),
94
- gr.CheckboxGroup(language_choices, type="value", value=['eng'], label='language'),
95
- gr.Textbox(lines=2, placeholder="Enter your prompt here", label="Prompt")
96
- ],
97
- outputs=[
98
- gr.Text(label="Grade"),
99
- gr.Number(label="Similarity Score (%)"),
100
- gr.Text(label="Feedback"),
101
- gr.Text(label="Generated Response")
102
- ],
103
- title="Automated Grading System",
104
- description="Upload an image of your answer sheet to get a grade from 1 to 5, similarity score, and feedback based on the model answer.",
105
- live=True
106
- )
107
 
108
  if __name__ == "__main__":
109
- interface.launch()
110
-
 
1
 
2
+ # import gradio as gr
3
+ # from transformers import pipeline
4
+ # import pytesseract
5
+ # from sentence_transformers import SentenceTransformer, util
6
+ # from PIL import Image
7
+ # from typing import List
8
+ # import requests
9
+
10
+ # # Initialize sentence transformer model
11
+ # model1 = SentenceTransformer('sentence-transformers/all-MiniLM-L6-v2')
12
+
13
+ # # Hugging Face API details
14
+ # API_URL = "https://api-inference.huggingface.co/models/openai-community/gpt2"
15
+ # headers = {"Authorization": f"Bearer {hf_TsCTtXxnvpmhFKABqKmcVLyLEhjQPsITSVx}"}
16
+
17
+ # # Function to interact with Hugging Face API for GPT-2
18
+ # def query(payload):
19
+ # response = requests.post(API_URL, headers=headers, json=payload)
20
+ # return response.json()
21
+
22
+ # # Function to generate text response from GPT-2 model using Hugging Face API
23
+ # def generate_response(prompt):
24
+ # response = query({"inputs": prompt})
25
+
26
+ # # Check if the response contains the expected format
27
+ # if isinstance(response, list) and len(response) > 0 and 'generated_text' in response[0]:
28
+ # return response[0]['generated_text']
29
+ # else:
30
+ # # Log the response if something unexpected is returned
31
+ # print("Unexpected response format:", response)
32
+ # return "Sorry, I couldn't generate a response."
33
+
34
+
35
+ # # Function to generate text response from GPT-2 model using Hugging Face API
36
+ # # def generate_response(prompt):
37
+ # # response = query({"inputs": prompt})
38
+ # # return response[0]['generated_text']
39
+
40
+ # # Extract text from an image using Tesseract
41
+ # def extract_text_from_image(filepath: str, languages: List[str]):
42
+ # image = Image.open(filepath)
43
+ # lang_str = '+'.join(languages) # Join languages for Tesseract
44
+ # return pytesseract.image_to_string(image=image, lang=lang_str)
45
+
46
+ # # Function to get embeddings for text using SentenceTransformer
47
+ # def get_embedding(text):
48
+ # return model1.encode(text, convert_to_tensor=True)
49
+
50
+ # # Calculate similarity between two texts using cosine similarity
51
+ # def calculate_similarity(text1, text2):
52
+ # embedding1 = get_embedding(text1)
53
+ # embedding2 = get_embedding(text2)
54
+ # similarity = util.pytorch_cos_sim(embedding1, embedding2)
55
+ # return similarity.item()
56
+
57
+ # # Assign grades based on similarity score
58
+ # def get_grade(similarity_score):
59
+ # if similarity_score >= 0.9:
60
+ # return 5
61
+ # elif similarity_score >= 0.8:
62
+ # return 4
63
+ # elif similarity_score >= 0.7:
64
+ # return 3
65
+ # elif similarity_score >= 0.6:
66
+ # return 2
67
+ # else:
68
+ # return 1
69
+
70
+ # # Function to evaluate student's answer by comparing it to a model answer
71
+ # def evaluate_answer(image, languages):
72
+ # student_answer = extract_text_from_image(image, languages)
73
+ # model_answer = "The process of photosynthesis helps plants produce glucose using sunlight."
74
+ # similarity_score = calculate_similarity(student_answer, model_answer)
75
+ # grade = get_grade(similarity_score)
76
+ # feedback = f"Student's answer: {student_answer}\nTeacher's answer: {model_answer}"
77
+ # prompt=f"the student got grades: {grade} when Student's answer is: {student_answer} and Teacher's answer is: {model_answer}. justify the grades given to student"
78
+ # return grade, similarity_score * 100, feedback, prompt
79
+
80
+ # # Main interface function for Gradio
81
+ # def gradio_interface(image, languages: List[str], prompt=""):
82
+ # grade, similarity_score, feedback,prompt = evaluate_answer(image, languages)
83
+ # response = generate_response(prompt)
84
+ # return grade, similarity_score, feedback, response
85
+
86
+ # # Get available Tesseract languages
87
+ # language_choices = pytesseract.get_languages()
88
+
89
+ # # Define Gradio interface
90
+ # interface = gr.Interface(
91
+ # fn=gradio_interface,
92
+ # inputs=[
93
+ # gr.Image(type="filepath", label="Input"),
94
+ # gr.CheckboxGroup(language_choices, type="value", value=['eng'], label='language'),
95
+ # gr.Textbox(lines=2, placeholder="Enter your prompt here", label="Prompt")
96
+ # ],
97
+ # outputs=[
98
+ # gr.Text(label="Grade"),
99
+ # gr.Number(label="Similarity Score (%)"),
100
+ # gr.Text(label="Feedback"),
101
+ # gr.Text(label="Generated Response")
102
+ # ],
103
+ # title="Automated Grading System",
104
+ # description="Upload an image of your answer sheet to get a grade from 1 to 5, similarity score, and feedback based on the model answer.",
105
+ # live=True
106
+ # )
107
+
108
+ # if __name__ == "__main__":
109
+ # interface.launch()
110
+
111
+
112
+
113
+
114
+
115
+ import os
116
+ from groq import Groq
117
  import gradio as gr
118
  from transformers import pipeline
119
  import pytesseract
 
125
  # Initialize sentence transformer model
126
  model1 = SentenceTransformer('sentence-transformers/all-MiniLM-L6-v2')
127
 
128
+ # Initialize Groq client
129
+ client = Groq(api_key=os.environ.get("GROQ_API_KEY"))
 
130
 
131
+ # System prompt for Groq
132
+ system_prompt = {
133
+ "role": "system",
134
+ "content": "You are a useful assistant. You reply with efficient answers."
135
+ }
136
 
137
+ # Function to interact with Groq for generating response
138
+ async def chat_groq(message, history):
139
+ messages = [system_prompt]
 
 
 
 
 
 
 
 
 
140
 
141
+ for msg in history:
142
+ messages.append({"role": "user", "content": str(msg[0])})
143
+ messages.append({"role": "assistant", "content": str(msg[1])})
144
+
145
+ messages.append({"role": "user", "content": str(message)})
146
+
147
+ response_content = ''
148
+
149
+ stream = client.chat.completions.create(
150
+ model="llama3-70b-8192",
151
+ messages=messages,
152
+ max_tokens=1024,
153
+ temperature=1.3,
154
+ stream=True
155
+ )
156
+
157
+ for chunk in stream:
158
+ content = chunk.choices[0].delta.content
159
+ if content:
160
+ response_content += chunk.choices[0].delta.content
161
+ yield response_content
162
 
163
  # Extract text from an image using Tesseract
164
  def extract_text_from_image(filepath: str, languages: List[str]):
 
197
  similarity_score = calculate_similarity(student_answer, model_answer)
198
  grade = get_grade(similarity_score)
199
  feedback = f"Student's answer: {student_answer}\nTeacher's answer: {model_answer}"
200
+ prompt = f"The student got grade: {grade} when the student's answer is: {student_answer} and the teacher's answer is: {model_answer}. Justify the grade given to the student."
201
  return grade, similarity_score * 100, feedback, prompt
202
 
203
  # Main interface function for Gradio
204
+ async def gradio_interface(image, languages: List[str], prompt="", history=[]):
205
+ grade, similarity_score, feedback, prompt = evaluate_answer(image, languages)
206
+ response = ""
207
+ async for result in chat_groq(prompt, history):
208
+ response = result # Get the Groq response
209
  return grade, similarity_score, feedback, response
210
 
211
  # Get available Tesseract languages
212
  language_choices = pytesseract.get_languages()
213
 
214
  # Define Gradio interface
215
+ with gr.Blocks(theme=gr.themes.Monochrome(), fill_height=True) as demo:
216
+ interface = gr.ChatInterface(gradio_interface,
217
+ inputs=[
218
+ gr.Image(type="filepath", label="Input"),
219
+ gr.CheckboxGroup(language_choices, type="value", value=['eng'], label='Language'),
220
+ gr.Textbox(lines=2, placeholder="Enter your prompt here", label="Prompt")
221
+ ],
222
+ outputs=[
223
+ gr.Text(label="Grade"),
224
+ gr.Number(label="Similarity Score (%)"),
225
+ gr.Text(label="Feedback"),
226
+ gr.Text(label="Generated Response")
227
+ ],
228
+ title="Automated Grading System",
229
+ description="Upload an image of your answer sheet to get a grade from 1 to 5, similarity score, and feedback based on the model answer.",
230
+ live=True)
 
231
 
232
  if __name__ == "__main__":
233
+ demo.queue()
234
+ demo.launch()