Gampanut commited on
Commit
11700c3
·
verified ·
1 Parent(s): 8ece733

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +62 -8
app.py CHANGED
@@ -18,6 +18,8 @@ import gspread
18
  from oauth2client.service_account import ServiceAccountCredentials
19
  from typing import Optional, List, Dict, Any
20
  from pydantic import BaseModel, Field
 
 
21
 
22
  # Custom Configuration for Pydantic
23
  class Config:
@@ -164,16 +166,52 @@ graphrag_chain = GraphQAChain.from_llm(
164
  verbose=True
165
  )
166
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
167
  def get_graphrag_response(question: str) -> str:
168
  system_prompt = "You are a Thai rice assistant that gives concise and direct answers. Do not explain the process, just provide the answer, provide the answer only in Thai."
169
  formatted_question = f"System Prompt: {system_prompt}\n\nQuestion: {question}"
170
  response = graphrag_chain.run(formatted_question)
171
  return response
172
 
173
- def compare_models(question: str) -> tuple[str, str]:
174
- rag_response = get_rag_response(question)
175
- graphrag_response = get_graphrag_response(question)
176
- return rag_response, graphrag_response
 
 
 
 
 
 
 
 
 
 
 
177
 
178
  # Gradio Interface
179
  with gr.Blocks() as demo:
@@ -181,16 +219,32 @@ with gr.Blocks() as demo:
181
  with gr.Column():
182
  question_input = gr.Textbox(label="ถามคำถามเกี่ยวกับข้าว:", placeholder="Enter your question about Thai rice")
183
  submit_btn = gr.Button(value="ถาม")
184
- rag_output = gr.Textbox(label="Model A", interactive=False)
185
- graphrag_output = gr.Textbox(label="Model B", interactive=False)
 
 
 
 
186
  feedback = gr.Radio(label="Which response is better?", choices=["A ดีกว่า", "B ดีกว่า", "เท่ากัน", "แย่ทั้งคู่"])
187
  submit_feedback = gr.Button(value="Submit Feedback")
188
 
 
 
 
 
 
 
 
 
189
  # Event handlers
190
- submit_btn.click(fn=compare_models, inputs=[question_input], outputs=[rag_output, graphrag_output])
 
 
 
 
191
  submit_feedback.click(fn=add_review, inputs=[question_input, rag_output, graphrag_output, feedback])
192
  demo.load(fn=load_data, inputs=None)
193
 
194
  # Launch the app
195
  if __name__ == "__main__":
196
- demo.launch()
 
18
  from oauth2client.service_account import ServiceAccountCredentials
19
  from typing import Optional, List, Dict, Any
20
  from pydantic import BaseModel, Field
21
+ import time
22
+ import tracemalloc
23
 
24
  # Custom Configuration for Pydantic
25
  class Config:
 
166
  verbose=True
167
  )
168
 
169
+ # Define functions to measure memory and time
170
+ def measure_memory_and_time(func):
171
+ def wrapper(*args, **kwargs):
172
+ tracemalloc.start() # Start memory tracking
173
+ start_time = time.time() # Start time tracking
174
+
175
+ result = func(*args, **kwargs)
176
+
177
+ current, peak = tracemalloc.get_traced_memory() # Get memory usage
178
+ tracemalloc.stop()
179
+
180
+ end_time = time.time() # End time tracking
181
+ elapsed_time = end_time - start_time # Calculate elapsed time
182
+
183
+ return result, elapsed_time, peak / 1024 # Convert memory to KB
184
+ return wrapper
185
+
186
+ # Modified functions to use memory and time measurement
187
+ @measure_memory_and_time
188
+ def get_rag_response(question: str) -> str:
189
+ context = get_context(question)
190
+ response = rag_llm.predict(prompt.format(context=context, question=question))
191
+ return response
192
+
193
+ @measure_memory_and_time
194
  def get_graphrag_response(question: str) -> str:
195
  system_prompt = "You are a Thai rice assistant that gives concise and direct answers. Do not explain the process, just provide the answer, provide the answer only in Thai."
196
  formatted_question = f"System Prompt: {system_prompt}\n\nQuestion: {question}"
197
  response = graphrag_chain.run(formatted_question)
198
  return response
199
 
200
+ # Modify compare_models to collect and display metrics
201
+ def compare_models(question: str) -> dict:
202
+ rag_response, rag_time, rag_memory = get_rag_response(question)
203
+ graphrag_response, graphrag_time, graphrag_memory = get_graphrag_response(question)
204
+
205
+ # Combine responses with metrics
206
+ results = {
207
+ "RAG Response": rag_response,
208
+ "RAG Time (s)": round(rag_time, 2),
209
+ "RAG Memory (KB)": round(rag_memory, 2),
210
+ "GraphRAG Response": graphrag_response,
211
+ "GraphRAG Time (s)": round(graphrag_time, 2),
212
+ "GraphRAG Memory (KB)": round(graphrag_memory, 2),
213
+ }
214
+ return results
215
 
216
  # Gradio Interface
217
  with gr.Blocks() as demo:
 
219
  with gr.Column():
220
  question_input = gr.Textbox(label="ถามคำถามเกี่ยวกับข้าว:", placeholder="Enter your question about Thai rice")
221
  submit_btn = gr.Button(value="ถาม")
222
+ rag_output = gr.Textbox(label="RAG Response", interactive=False)
223
+ rag_time_output = gr.Textbox(label="RAG Time (s)", interactive=False)
224
+ rag_memory_output = gr.Textbox(label="RAG Memory (KB)", interactive=False)
225
+ graphrag_output = gr.Textbox(label="GraphRAG Response", interactive=False)
226
+ graphrag_time_output = gr.Textbox(label="GraphRAG Time (s)", interactive=False)
227
+ graphrag_memory_output = gr.Textbox(label="GraphRAG Memory (KB)", interactive=False)
228
  feedback = gr.Radio(label="Which response is better?", choices=["A ดีกว่า", "B ดีกว่า", "เท่ากัน", "แย่ทั้งคู่"])
229
  submit_feedback = gr.Button(value="Submit Feedback")
230
 
231
+ # Update Gradio app with time and memory results
232
+ def display_results(question):
233
+ results = compare_models(question)
234
+ return (
235
+ results["RAG Response"], results["RAG Time (s)"], results["RAG Memory (KB)"],
236
+ results["GraphRAG Response"], results["GraphRAG Time (s)"], results["GraphRAG Memory (KB)"]
237
+ )
238
+
239
  # Event handlers
240
+ submit_btn.click(
241
+ fn=display_results,
242
+ inputs=[question_input],
243
+ outputs=[rag_output, rag_time_output, rag_memory_output, graphrag_output, graphrag_time_output, graphrag_memory_output]
244
+ )
245
  submit_feedback.click(fn=add_review, inputs=[question_input, rag_output, graphrag_output, feedback])
246
  demo.load(fn=load_data, inputs=None)
247
 
248
  # Launch the app
249
  if __name__ == "__main__":
250
+ demo.launch()