rknl commited on
Commit
748c045
·
verified ·
1 Parent(s): 9541538

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +181 -4
app.py CHANGED
@@ -57,6 +57,53 @@ def query_tqa(query, search_level):
57
  # rag_reference_text,
58
  )
59
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
60
  def show_graph():
61
  """
62
  Show the latest graph visualization in an iframe.
@@ -81,9 +128,45 @@ def show_graph():
81
  return f"Error: {str(e)}"
82
 
83
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
84
  with gr.Blocks() as demo:
85
  gr.Markdown("# Comfy Virtual Assistant")
86
- chatbot = gr.Chatbot()
 
 
 
 
 
 
 
 
 
87
  msg = gr.Textbox(label="Input Your Query")
88
  clear = gr.ClearButton([msg, chatbot])
89
 
@@ -94,12 +177,106 @@ with gr.Blocks() as demo:
94
  return "", chat_history
95
 
96
  msg.submit(respond, [msg, chatbot], [msg, chatbot])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
97
 
 
 
98
 
99
- with gr.Row():
100
- plot_button = gr.Button("Plot Knowledge Graph", variant="secondary")
101
 
102
- kg_output = gr.HTML()
 
 
 
 
103
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
104
 
105
  demo.launch(auth=(os.getenv("ID"), os.getenv("PASS")), share=False)
 
57
  # rag_reference_text,
58
  )
59
 
60
+
61
+ # def eval_llm(query, rag_response, grag_response):
62
+ # """
63
+ # Evaluate the Graph-RAG and RAG responses using an LLM.
64
+
65
+ # Args:
66
+ # query (str): The query that was asked.
67
+ # rag_response (str): The response from the Vanilla-RAG model.
68
+ # grag_response (str): The response from the Graph-RAG model.
69
+
70
+ # Returns:
71
+ # str: The evaluation text on various criteria from the LLM.
72
+ # """
73
+
74
+ # if not query.strip() or not rag_response.strip() or not grag_response.strip():
75
+ # raise gr.Error("Please ask a query and get responses before evaluating.")
76
+
77
+ # eval_text = evaluate_llm(query, grag_response, rag_response)
78
+ # return eval_text
79
+
80
+
81
+ # def reason_and_plot(query, grag_response, grag_reference):
82
+ # """
83
+ # Get the reasoning graph for a query and plot the knowledge graph.
84
+
85
+ # Args:
86
+ # query (str): The query to ask the Graph-RAG.
87
+ # grag_response (str): The response from the Graph-RAG model.
88
+ # grag_reference (str): The reference text from the Graph-RAG model.
89
+
90
+ # Returns:
91
+ # tuple: The reasoning graph and the HTML to plot the knowledge graph.
92
+ # """
93
+
94
+ # if not query.strip() or not grag_response.strip() or not grag_reference.strip():
95
+ # raise gr.Error(
96
+ # "Please ask a query and get a Graph-RAG response before reasoning."
97
+ # )
98
+
99
+ # graph_reasoning = reasoning_graph(query, grag_response, grag_reference)
100
+ # escaped_html = plot_subgraph(grag_reference)
101
+
102
+ # iframe_html = f'<iframe srcdoc="{escaped_html}" width="100%" height="400px" frameborder="0"></iframe>'
103
+
104
+ # return graph_reasoning, iframe_html
105
+
106
+
107
  def show_graph():
108
  """
109
  Show the latest graph visualization in an iframe.
 
128
  return f"Error: {str(e)}"
129
 
130
 
131
+ def reveal_coupon(query, grag_response):
132
+ """
133
+ Get the coupon from the query and response.
134
+
135
+ Args:
136
+ query (str): Query asked to Graph-RAG.
137
+ grag_response (str): Response from the Graph-RAG model.
138
+
139
+ Returns:
140
+ str: Coupon with reasoning.
141
+ """
142
+
143
+ if not query.strip() or not grag_response.strip():
144
+ raise gr.Error("Please ask a query and get a response before revealing the coupon.")
145
+
146
+ coupon = get_coupon(query, grag_response)
147
+ return coupon
148
+ # Implementing Gradio 5 features and building a ChatInterface UI yourself
149
+ PLACEHOLDER = """<div style="padding: 20px; text-align: center; display: flex; flex-direction: column; align-items: center;">
150
+ <img src="https://ysharma-dummy-chat-app.hf.space/file=/tmp/gradio/c21ff9c8e7ecb2f7d957a72f2ef03c610ac7bbc4/Meta_lockup_positive%20primary_RGB_small.jpg" style="width: 80%; max-width: 550px; height: auto; opacity: 0.55; margin-bottom: 10px;">
151
+ <h1 style="font-size: 28px; margin: 0;">Meta llama3.2</h1>
152
+ <p style="font-size: 18px; margin: 5px 0 0; opacity: 0.65;">
153
+ <a href="https://huggingface.co/blog/llama32" target="_blank" style="color: inherit; text-decoration: none;">Learn more about Llama 3.2</a>
154
+ </p>
155
+ </div>"""
156
+
157
+
158
  with gr.Blocks() as demo:
159
  gr.Markdown("# Comfy Virtual Assistant")
160
+ chatbot = gr.Chatbot(
161
+ label="Comfy Virtual Assistant",
162
+ type="messages",
163
+ scale=1,
164
+ suggestions = [
165
+ {"text": "How much iphone cost?"},
166
+ {"text": "What phone options do i have ?"}
167
+ ],
168
+ placeholder = PLACEHOLDER,
169
+ )
170
  msg = gr.Textbox(label="Input Your Query")
171
  clear = gr.ClearButton([msg, chatbot])
172
 
 
177
  return "", chat_history
178
 
179
  msg.submit(respond, [msg, chatbot], [msg, chatbot])
180
+
181
+ # with gr.Row():
182
+ # with gr.Column(scale=4):
183
+ # query_input = gr.Textbox(label="Input Your Query", lines=3)
184
+ # # with gr.Column(scale=1):
185
+ # # search_level = gr.Slider(
186
+ # # minimum=1, maximum=50, value=3, step=5, label="Search Level"
187
+ # # )
188
+ # ask_button = gr.Button("Ask Comfy", variant="primary")
189
+
190
+ # examples = gr.Examples(
191
+ # examples=[
192
+ # ["Recommend me an apple phone that has more than 10MP camera."],
193
+ # ["What is the price of Samsung Galaxy S24 Ultra 12/256Gb Titanium Gray"],
194
+ # ["I want a phone with 5000 mAH or more battery"],
195
+ # ],
196
+ # inputs=[query_input],
197
+ # )
198
+
199
+ # with gr.Row():
200
+ # with gr.Column():
201
+ # gr.Markdown("### Graph-RAG")
202
+ # grag_output = gr.Textbox(label="Response", lines=5)
203
+ # grag_reference = gr.Textbox(label="Triplets", lines=3)
204
+ # with gr.Accordion("Extracted Reference (Raw)", open=False):
205
+ # grag_reference_text = gr.Textbox(label="Raw Reference", lines=5)
206
+
207
+ # with gr.Column():
208
+ # gr.Markdown("### Vanilla RAG")
209
+ # rag_output = gr.Textbox(label="Response", lines=5)
210
+ # rag_reference = gr.Textbox(label="Extracted Reference", lines=3)
211
+ # with gr.Accordion("Extracted Reference (Raw)", open=False):
212
+ # rag_reference_text = gr.Textbox(label="Raw Reference", lines=5)
213
+
214
+ # gr.Markdown("### Coupon")
215
+ # with gr.Row():
216
+ # with gr.Column():
217
+ # coupon = gr.Text(label="Coupon", lines=1)
218
+ # with gr.Column():
219
+ # reveal = gr.Button("Reveal Coupon", variant="secondary")
220
+
221
+ # with gr.Row():
222
+ # gr.Markdown("### Evaluate and Compare")
223
+
224
+ # with gr.Row():
225
+ # eval_button = gr.Button("Evaluate LLMs", variant="secondary")
226
+
227
+ # grag_performance = gr.Textbox(label="Evaluation", lines=3)
228
 
229
+ # with gr.Row():
230
+ # gr.Markdown("### Graph Reasoning")
231
 
232
+ # with gr.Row():
233
+ # reason_button = gr.Button("Get Graph Reasoning", variant="secondary")
234
 
235
+ # with gr.Row():
236
+ # with gr.Column():
237
+ # grag_reasoning = gr.Textbox(label="Graph-RAG Reasoning", lines=5)
238
+ # with gr.Column():
239
+ # subgraph_plot = gr.HTML()
240
 
241
+ # with gr.Row():
242
+ # plot_button = gr.Button("Plot Knowledge Graph", variant="secondary")
243
+
244
+ # kg_output = gr.HTML()
245
+
246
+ # ask_button.click(
247
+ # query_tqa,
248
+ # inputs=[query_input, search_level],
249
+ # outputs=[
250
+ # grag_output,
251
+ # # grag_reference,
252
+ # # grag_reference_text,
253
+ # # rag_output,
254
+ # # rag_reference,
255
+ # # rag_reference_text,
256
+ # ],
257
+ # )
258
+
259
+ # eval_button.click(
260
+ # eval_llm,
261
+ # inputs=[query_input, rag_output, grag_output],
262
+ # outputs=[grag_performance],
263
+ # )
264
+
265
+ # reason_button.click(
266
+ # reason_and_plot,
267
+ # inputs=[query_input, grag_output, grag_reference],
268
+ # outputs=[grag_reasoning, subgraph_plot],
269
+ # )
270
+
271
+ # plot_button.click(
272
+ # show_graph,
273
+ # outputs=[kg_output],
274
+ # )
275
+
276
+ # reveal.click(
277
+ # reveal_coupon,
278
+ # inputs=[query_input, grag_output],
279
+ # outputs=[coupon],
280
+ # )
281
 
282
  demo.launch(auth=(os.getenv("ID"), os.getenv("PASS")), share=False)