Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,10 +1,3 @@
|
|
1 |
-
# -*- coding: utf-8 -*-
|
2 |
-
"""Chat_with_agent_v4.ipynb
|
3 |
-
Automatically generated by Colab.
|
4 |
-
Original file is located at
|
5 |
-
https://colab.research.google.com/drive/1T5Buj_yHaAnfoO__2-gCFDSvBVheiHrF
|
6 |
-
"""
|
7 |
-
|
8 |
from PIL import Image
|
9 |
import base64
|
10 |
from io import BytesIO
|
@@ -18,17 +11,17 @@ import time
|
|
18 |
import shutil
|
19 |
import json
|
20 |
import nltk
|
21 |
-
#audio package
|
22 |
import speech_recognition as sr
|
23 |
from pydub import AudioSegment
|
24 |
from pydub.playback import play
|
25 |
-
#email library
|
26 |
import smtplib
|
27 |
from email.mime.multipart import MIMEMultipart
|
28 |
from email.mime.text import MIMEText
|
29 |
from email.mime.base import MIMEBase
|
30 |
from email import encoders
|
31 |
-
#langchain
|
32 |
from langchain_core.prompts import ChatPromptTemplate
|
33 |
from langchain_core.output_parsers import StrOutputParser
|
34 |
from langchain_core.runnables import RunnableSequence, RunnableLambda
|
@@ -42,37 +35,69 @@ from langchain.tools import StructuredTool
|
|
42 |
from langchain.pydantic_v1 import BaseModel, Field
|
43 |
from PyPDF2 import PdfReader
|
44 |
from nltk.tokenize import sent_tokenize
|
|
|
45 |
from sqlalchemy import create_engine
|
46 |
from sqlalchemy.sql import text
|
47 |
|
48 |
-
#pandas
|
49 |
import pandas as pd
|
50 |
from pandasai.llm.openai import OpenAI
|
51 |
from pandasai import SmartDataframe
|
|
|
52 |
|
|
|
|
|
53 |
|
|
|
|
|
54 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
55 |
|
56 |
nltk.download('punkt')
|
57 |
|
58 |
-
open_api_key_token = os.
|
59 |
|
60 |
os.environ['OPENAI_API_KEY'] = open_api_key_token
|
61 |
-
pdf_path="Inbound.pdf"
|
62 |
|
63 |
-
db_uri =
|
64 |
# Database setup
|
65 |
|
66 |
db = SQLDatabase.from_uri(db_uri)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
67 |
|
68 |
# LLM setup
|
69 |
-
llm = ChatOpenAI(model="gpt-4o-mini",max_tokens=300,temperature=0.1)
|
70 |
llm_chart = OpenAI()
|
71 |
|
|
|
72 |
def get_schema(_):
|
73 |
schema_info = db.get_table_info() # This should be a string of your SQL schema
|
74 |
return schema_info
|
75 |
|
|
|
76 |
def generate_sql_query(question):
|
77 |
schema = get_schema(None)
|
78 |
template_query_generation = """
|
@@ -90,8 +115,10 @@ def generate_sql_query(question):
|
|
90 |
StrOutputParser()
|
91 |
)
|
92 |
sql_query = sql_chain.invoke({})
|
|
|
93 |
return sql_query.strip()
|
94 |
|
|
|
95 |
def run_query(query):
|
96 |
# Clean the query by removing markdown symbols and trimming whitespace
|
97 |
clean_query = query.replace("```sql", "").replace("```", "").strip()
|
@@ -103,6 +130,7 @@ def run_query(query):
|
|
103 |
print(f"Error executing query: {e}")
|
104 |
return None
|
105 |
|
|
|
106 |
# Define the database query tool
|
107 |
# The function that uses the above models
|
108 |
# Define the function that will handle the database query
|
@@ -112,53 +140,55 @@ def database_tool(question):
|
|
112 |
print(sql_query)
|
113 |
return run_query(sql_query)
|
114 |
|
|
|
115 |
def get_ASN_data(question):
|
116 |
-
|
117 |
-
|
118 |
-
|
119 |
-
|
120 |
-
|
121 |
-
|
122 |
-
|
123 |
-
|
124 |
-
|
125 |
-
|
126 |
-
|
127 |
-
|
128 |
-
|
129 |
-
|
130 |
-
|
131 |
-
|
132 |
-
|
133 |
-
|
134 |
-
|
135 |
-
|
136 |
-
|
137 |
-
|
138 |
-
|
139 |
-
|
140 |
-
|
141 |
-
|
142 |
-
|
143 |
-
|
144 |
-
|
145 |
-
|
146 |
-
|
147 |
-
|
148 |
-
|
149 |
-
|
150 |
-
|
151 |
-
|
152 |
-
|
153 |
-
|
154 |
-
|
155 |
-
|
156 |
-
|
157 |
-
|
158 |
-
|
159 |
-
|
160 |
-
|
161 |
-
|
|
|
162 |
|
163 |
def load_and_split_pdf(pdf_path):
|
164 |
reader = PdfReader(pdf_path)
|
@@ -168,16 +198,22 @@ def load_and_split_pdf(pdf_path):
|
|
168 |
text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=50)
|
169 |
texts = text_splitter.split_text(text)
|
170 |
return texts
|
|
|
|
|
171 |
def create_vector_store(texts):
|
172 |
-
|
173 |
-
|
174 |
-
|
175 |
|
176 |
-
|
|
|
|
|
|
|
177 |
docs = vector_store.similarity_search(query, k=5)
|
178 |
print(f"Vector store return: {docs}")
|
179 |
return docs
|
180 |
|
|
|
181 |
def summarize_document(docs):
|
182 |
summarized_docs = []
|
183 |
for doc in docs:
|
@@ -194,23 +230,26 @@ def summarize_document(docs):
|
|
194 |
summarized_docs.append(summarized_content)
|
195 |
return '\n\n'.join(summarized_docs)
|
196 |
|
|
|
197 |
texts = load_and_split_pdf(pdf_path)
|
198 |
vector_store = create_vector_store(texts)
|
199 |
|
|
|
200 |
def document_data_tool(question):
|
201 |
print(f"Document data tool enter: {question}")
|
202 |
# query_string = question['tags'][0] if 'tags' in question and question['tags'] else ""
|
203 |
-
query_response = query_vector_store(vector_store, question)
|
204 |
print("query****")
|
205 |
print(query_response)
|
206 |
-
#summarized_response = summarize_document(query_response)
|
207 |
-
#print("summary***")
|
208 |
-
#print(summarized_response)
|
209 |
return query_response
|
210 |
-
|
|
|
211 |
def send_email_with_attachment(recipient_email, subject, body, attachment_path):
|
212 |
-
sender_email =
|
213 |
-
sender_password = "
|
214 |
|
215 |
# Create a multipart message
|
216 |
msg = MIMEMultipart()
|
@@ -245,118 +284,111 @@ def send_email_with_attachment(recipient_email, subject, body, attachment_path):
|
|
245 |
text = msg.as_string()
|
246 |
server.sendmail(sender_email, recipient_email, text)
|
247 |
server.quit()
|
248 |
-
#return 1
|
|
|
249 |
|
250 |
def make_api_request(url, params):
|
251 |
-
|
252 |
-
|
253 |
-
|
254 |
-
|
255 |
-
|
256 |
-
|
257 |
-
|
258 |
-
|
259 |
-
|
260 |
-
|
261 |
-
|
262 |
-
|
263 |
-
|
264 |
-
|
265 |
-
#fetch warehouse ID
|
266 |
-
{
|
267 |
-
"url": "http://193.203.162.39:9090/nxt-wms/userWarehouse/fetchWarehouseForUserId?",
|
268 |
-
"params": {"query": name, "userId": "164"}
|
269 |
-
},
|
270 |
-
|
271 |
-
#Stock summary based on warehouse id
|
272 |
-
{
|
273 |
-
"url": "http://193.203.162.39:9090/nxt-wms/transactionHistory/stockSummary?",
|
274 |
-
"params": {"branchId": "343", "onDate": "2024-08-09", "warehouseId" : warehouse_id }
|
275 |
-
}
|
276 |
-
]
|
277 |
|
278 |
def inventory_report(question):
|
|
|
|
|
279 |
|
280 |
-
# Split the question to extract warehouse name, user question, and optional email
|
281 |
-
parts = question.split(":", 2)
|
282 |
-
name = parts[0].strip()
|
283 |
-
user_question = parts[1].strip()
|
284 |
-
user_email = parts[2].strip() if len(parts) > 2 else None
|
285 |
-
print(f"Warehouse: {name}, Email: {user_email}, Question: {user_question}")
|
286 |
-
|
287 |
-
|
288 |
-
data = make_api_request(apis[0]["url"], apis[0]["params"])
|
289 |
-
if data:
|
290 |
-
#print(data)
|
291 |
-
# Extracting the id for the warehouse with the name "WH"
|
292 |
-
warehouse_id = next((item['id'] for item in data['result'] if item['name'] == name), None)
|
293 |
-
|
294 |
-
#print(f"The id for the warehouse named {name} is: {warehouse_id}")
|
295 |
-
#Step 3: Update the placeholder with the actual warehouse_id
|
296 |
-
for api in apis:
|
297 |
-
if "warehouseId" in api["params"]:
|
298 |
-
api["params"]["warehouseId"] = warehouse_id
|
299 |
-
|
300 |
-
|
301 |
-
data1 = make_api_request(apis[1]["url"], apis[1]["params"])
|
302 |
-
|
303 |
-
from tabulate import tabulate
|
304 |
-
|
305 |
-
|
306 |
-
headers = ["S.No","Warehouse Code", "Warehouse Name", "Customer Code", "Customer Name", "Item Code", "Item Name", "Currency", "EAN", "UOM", "Quantity", "Gross Weight", "Volume", "Total Value"]
|
307 |
-
table_data = []
|
308 |
-
|
309 |
-
for index, item in enumerate(data1['result'], start=1):
|
310 |
-
row = [
|
311 |
-
index, # Serial number
|
312 |
-
item['warehouse']['code'],
|
313 |
-
item['warehouse']['name'],
|
314 |
-
item['customer']['code'],
|
315 |
-
item['customer']['name'],
|
316 |
-
item['skuMaster']['code'],
|
317 |
-
item['skuMaster']['name'],
|
318 |
-
item['currency']['code'],
|
319 |
-
item['eanUpc'],
|
320 |
-
item['uom']['code'],
|
321 |
-
item['totalQty'],
|
322 |
-
item['grossWeight'],
|
323 |
-
item['volume'],
|
324 |
-
item['totalValue']
|
325 |
-
]
|
326 |
-
table_data.append(row)
|
327 |
-
|
328 |
-
|
329 |
-
# Convert to pandas DataFrame
|
330 |
-
df = pd.DataFrame(table_data, columns=headers)
|
331 |
-
|
332 |
-
sdf = SmartDataframe(df, config={"llm": llm_chart})
|
333 |
-
|
334 |
-
#chart = sdf.chat("Can you draw a bar chart with all avaialble item name and quantity.")
|
335 |
-
chart = sdf.chat(question)
|
336 |
-
|
337 |
-
#email send
|
338 |
-
if user_email:
|
339 |
-
# Send email with the chart image attached
|
340 |
-
send_email_with_attachment(
|
341 |
-
recipient_email=user_email,
|
342 |
-
subject="Warehouse Inventory Report",
|
343 |
-
body="Please find the attached bar chart report for the warehouse inventory analysis.",
|
344 |
-
#attachment_path=chart_path
|
345 |
-
attachment_path="/home/user/app/exports/charts/temp_chart.png"
|
346 |
-
)
|
347 |
|
348 |
-
|
349 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
350 |
|
351 |
# Define input and output models using Pydantic
|
352 |
class QueryInput(BaseModel):
|
353 |
-
question: str = Field(
|
|
|
354 |
# config: dict = Field(default={}, description="Optional configuration for the database query.")
|
355 |
|
356 |
|
357 |
# Define the output model for database queries
|
358 |
class QueryOutput(BaseModel):
|
359 |
-
result: str = Field(...,
|
|
|
|
|
360 |
|
361 |
# Wrap the function with StructuredTool for better parameter handling
|
362 |
tools = [
|
@@ -368,11 +400,11 @@ tools = [
|
|
368 |
description="Tool to get details of ASN api. ASN id will be in the input with the format of first three letters as ASN and it is followed by 11 digit numeral. Pass only the id as input. Do not send the complete user question to the tool. If there are any other queries related to ASN without ASN id, please use the document tool."
|
369 |
),
|
370 |
StructuredTool(
|
371 |
-
|
372 |
-
|
373 |
-
|
374 |
-
|
375 |
-
|
376 |
),
|
377 |
StructuredTool(
|
378 |
func=database_tool,
|
@@ -386,17 +418,14 @@ tools = [
|
|
386 |
name="dataVisualization",
|
387 |
args_schema=QueryInput,
|
388 |
output_schema=QueryOutput,
|
389 |
-
description
|
390 |
-
Tool to generate a visual output (such as a bar chart) for a particular warehouse based on the provided question.
|
391 |
This tool processes the user question to identify the warehouse name and the specific request. If the user specifies
|
392 |
an email, include the email in the input. The input format should be: 'warehouse name: user question: email (if any)'.
|
393 |
The tool generates the requested chart and sends it to the provided email if specified.
|
394 |
-
|
395 |
Examples:
|
396 |
-
1. Question without email: "Analyze item name and quantity in a bar chart in warehouse
|
397 |
-
Input to tool: "
|
398 |
-
|
399 |
-
2. Question with email: "Analyze item name and quantity in a bar chart in warehouse Allcargo Logistics report to send email to example@example.com"
|
400 |
Input to tool: "Allcargo Logistics: I want to analyze item name and quantity in a bar chart: example@example.com"
|
401 |
"""
|
402 |
)
|
@@ -415,30 +444,35 @@ llm = llm.bind()
|
|
415 |
agent = create_tool_calling_agent(llm, tools, ChatPromptTemplate.from_template(prompt_template))
|
416 |
agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True)
|
417 |
|
|
|
418 |
def ensure_temp_chart_dir():
|
419 |
-
temp_chart_dir = "
|
420 |
if not os.path.exists(temp_chart_dir):
|
421 |
os.makedirs(temp_chart_dir)
|
422 |
|
|
|
423 |
def clean_gradio_tmp_dir():
|
424 |
-
tmp_dir = "
|
425 |
if os.path.exists(tmp_dir):
|
426 |
try:
|
427 |
shutil.rmtree(tmp_dir)
|
428 |
except Exception as e:
|
429 |
print(f"Error cleaning up /tmp/gradio/ directory: {e}")
|
430 |
|
|
|
431 |
# Define the interface function
|
432 |
max_iterations = 5
|
433 |
iterations = 0
|
434 |
|
|
|
435 |
def answer_question(user_question, chatbot, audio=None):
|
|
|
436 |
global iterations
|
437 |
iterations = 0
|
438 |
# Ensure the temporary chart directory exists
|
439 |
-
#ensure_temp_chart_dir()
|
440 |
# Clean the /tmp/gradio/ directory
|
441 |
-
#clean_gradio_tmp_dir()
|
442 |
# Handle audio input if provided
|
443 |
if audio is not None:
|
444 |
sample_rate, audio_data = audio
|
@@ -464,15 +498,17 @@ def answer_question(user_question, chatbot, audio=None):
|
|
464 |
|
465 |
while iterations < max_iterations:
|
466 |
print(user_question)
|
467 |
-
if "send email to" in user_question:
|
468 |
-
|
469 |
-
|
470 |
-
|
471 |
-
|
472 |
-
|
473 |
-
|
474 |
-
|
475 |
-
|
|
|
|
|
476 |
if isinstance(response, dict):
|
477 |
response_text = response.get("output", "")
|
478 |
else:
|
@@ -483,45 +519,53 @@ def answer_question(user_question, chatbot, audio=None):
|
|
483 |
|
484 |
if iterations == max_iterations:
|
485 |
return "The agent could not generate a valid response within the iteration limit."
|
486 |
-
|
487 |
-
|
|
|
488 |
# Open the image file
|
489 |
-
img = Image.open(
|
490 |
-
|
491 |
# Convert the PIL Image to a base64 encoded string
|
492 |
buffered = BytesIO()
|
493 |
img.save(buffered, format="PNG")
|
494 |
img_str = base64.b64encode(buffered.getvalue()).decode("utf-8")
|
495 |
-
#print(img_str)
|
496 |
-
img =
|
497 |
-
#image = gr.Image(value=img_str)
|
498 |
-
chatbot.append((user_question,img))
|
499 |
-
#print(chatbot)
|
500 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
501 |
if "send email to" in user_question:
|
502 |
try:
|
503 |
-
os.remove(
|
504 |
except Exception as e:
|
505 |
print(f"Error cleaning up image file: {e}")
|
506 |
except Exception as e:
|
507 |
print(f"Error loading image file: {e}")
|
508 |
chatbot.append((user_question, "Chart generation failed. Please try again."))
|
509 |
return gr.update(value=chatbot)
|
510 |
-
|
511 |
|
512 |
-
|
513 |
-
|
|
|
514 |
else:
|
515 |
chatbot.append((user_question, response_text))
|
516 |
-
#print(chatbot)
|
517 |
return gr.update(value=chatbot)
|
518 |
-
#response_text = response_text.replace('\n', ' ').replace(' ', ' ').strip()
|
519 |
-
#return response_text
|
520 |
-
|
521 |
-
import gradio as gr
|
522 |
|
523 |
css = """
|
524 |
-
|
525 |
.gr-chatbot {
|
526 |
/* Custom styles for the Chatbot component */
|
527 |
border: 1px solid #ccc;
|
@@ -532,63 +576,115 @@ css = """
|
|
532 |
/* Adjust the width as needed */
|
533 |
overflow-y: auto; /* Add scroll if the content exceeds the height */
|
534 |
}
|
535 |
-
|
536 |
-
|
537 |
.gr-button {
|
538 |
height: 40px; /* Adjust the height as needed */
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
539 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
540 |
"""
|
541 |
|
542 |
def submit_feedback(feedback, chatbot):
|
|
|
543 |
feedback_response = "User feedback: " + feedback
|
544 |
-
return chatbot + [(feedback_response,None)], gr.update(visible=False)
|
545 |
|
546 |
def handle_dislike(data: gr.LikeData):
|
547 |
if not data.liked:
|
548 |
print("downvote")
|
|
|
549 |
return gr.update(visible=True), gr.update(visible=True)
|
550 |
else:
|
551 |
print("upvote")
|
552 |
return gr.update(visible=False), gr.update(visible=False)
|
553 |
|
554 |
|
555 |
-
|
556 |
-
|
557 |
with gr.Blocks(css=css) as demo:
|
558 |
gr.Markdown("<CENTER><h2 style='font-size: 20px; font-family: Calibri;'>NewageNXT GPT</h2></CENTER>")
|
559 |
-
|
560 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
561 |
with gr.Row():
|
562 |
with gr.Column(scale=1):
|
563 |
-
message = gr.Textbox(show_label=False)
|
564 |
-
|
565 |
-
|
|
|
|
|
566 |
with gr.Row():
|
|
|
|
|
567 |
button = gr.Button("Submit", elem_classes="gr-button")
|
568 |
gr.ClearButton(message, elem_classes="gr-button")
|
569 |
-
with gr.Row():
|
570 |
-
with gr.Column(scale=1):
|
571 |
-
feedback_textbox = gr.Textbox(visible=False, show_label=False)
|
572 |
-
with gr.Column(scale=1):
|
573 |
-
submit_feedback_button = gr.Button("Submit Feedback", visible=False, elem_classes="gr-button")
|
574 |
|
575 |
-
|
576 |
-
|
577 |
-
message.submit(answer_question, [message, chatbot], [chatbot])
|
578 |
message.submit(lambda x: gr.update(value=""), None, [message], queue=False)
|
579 |
button.click(lambda x: gr.update(value=''), [], [message])
|
580 |
-
|
581 |
-
|
582 |
|
583 |
-
chatbot.like(handle_dislike,None, outputs=[feedback_textbox, submit_feedback_button])
|
584 |
-
submit_feedback_button.click(submit_feedback, [feedback_textbox, chatbot],
|
|
|
585 |
submit_feedback_button.click(lambda x: gr.update(value=''), [], [feedback_textbox])
|
586 |
-
|
587 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
588 |
|
589 |
-
|
590 |
|
591 |
-
|
592 |
-
|
593 |
-
demo.launch()
|
594 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
from PIL import Image
|
2 |
import base64
|
3 |
from io import BytesIO
|
|
|
11 |
import shutil
|
12 |
import json
|
13 |
import nltk
|
14 |
+
# audio package
|
15 |
import speech_recognition as sr
|
16 |
from pydub import AudioSegment
|
17 |
from pydub.playback import play
|
18 |
+
# email library
|
19 |
import smtplib
|
20 |
from email.mime.multipart import MIMEMultipart
|
21 |
from email.mime.text import MIMEText
|
22 |
from email.mime.base import MIMEBase
|
23 |
from email import encoders
|
24 |
+
# langchain
|
25 |
from langchain_core.prompts import ChatPromptTemplate
|
26 |
from langchain_core.output_parsers import StrOutputParser
|
27 |
from langchain_core.runnables import RunnableSequence, RunnableLambda
|
|
|
35 |
from langchain.pydantic_v1 import BaseModel, Field
|
36 |
from PyPDF2 import PdfReader
|
37 |
from nltk.tokenize import sent_tokenize
|
38 |
+
from datetime import datetime
|
39 |
from sqlalchemy import create_engine
|
40 |
from sqlalchemy.sql import text
|
41 |
|
42 |
+
# pandas
|
43 |
import pandas as pd
|
44 |
from pandasai.llm.openai import OpenAI
|
45 |
from pandasai import SmartDataframe
|
46 |
+
from dotenv import load_dotenv
|
47 |
|
48 |
+
# Load environment variables
|
49 |
+
load_dotenv()
|
50 |
|
51 |
+
# langfuse analytics
|
52 |
+
from langfuse.callback import CallbackHandler
|
53 |
|
54 |
+
# LangFuse API keys and host settings
|
55 |
+
os.environ["LANGFUSE_PUBLIC_KEY"] = os.getenv("LANGFUSE_PUBLIC_KEY")
|
56 |
+
os.environ["LANGFUSE_SECRET_KEY"] = os.getenv("LANGFUSE_SECRET_KEY")
|
57 |
+
os.environ["LANGFUSE_HOST"] = os.getenv("LANGFUSE_HOST")
|
58 |
+
|
59 |
+
langfuse_handler = CallbackHandler()
|
60 |
+
langfuse_handler.auth_check() # Optional: Checks if the authentication is successful
|
61 |
|
62 |
nltk.download('punkt')
|
63 |
|
64 |
+
open_api_key_token = os.getenv("OPENAI_API_KEY")
|
65 |
|
66 |
os.environ['OPENAI_API_KEY'] = open_api_key_token
|
67 |
+
pdf_path = "Inbound.pdf"
|
68 |
|
69 |
+
db_uri = os.getenv("POSTGRESQL_CONNECTION")
|
70 |
# Database setup
|
71 |
|
72 |
db = SQLDatabase.from_uri(db_uri)
|
73 |
+
user_email = "lakshmivairamani@gmail.com"
|
74 |
+
warehouse_name = ""
|
75 |
+
warehouse_id = ""
|
76 |
+
inventory_date = datetime.today().strftime('%Y-%m-%d')
|
77 |
+
apis = [
|
78 |
+
# fetch warehouse ID
|
79 |
+
{
|
80 |
+
"url": "http://193.203.162.39:9090/nxt-wms/userWarehouse/fetchWarehouseForUserId?",
|
81 |
+
"params": {"query": warehouse_name, "userId": 164}
|
82 |
+
},
|
83 |
+
|
84 |
+
# Stock summary based on warehouse id
|
85 |
+
{
|
86 |
+
"url": "http://193.203.162.39:9090/nxt-wms/transactionHistory/stockSummary?",
|
87 |
+
"params": {"branchId": 343, "onDate": inventory_date, "warehouseId": warehouse_id}
|
88 |
+
}
|
89 |
+
]
|
90 |
|
91 |
# LLM setup
|
92 |
+
llm = ChatOpenAI(model="gpt-4o-mini", max_tokens=300, temperature=0.1)
|
93 |
llm_chart = OpenAI()
|
94 |
|
95 |
+
|
96 |
def get_schema(_):
|
97 |
schema_info = db.get_table_info() # This should be a string of your SQL schema
|
98 |
return schema_info
|
99 |
|
100 |
+
|
101 |
def generate_sql_query(question):
|
102 |
schema = get_schema(None)
|
103 |
template_query_generation = """
|
|
|
115 |
StrOutputParser()
|
116 |
)
|
117 |
sql_query = sql_chain.invoke({})
|
118 |
+
sql_query = sql_chain.invoke({}, config={"callbacks": [langfuse_handler]})
|
119 |
return sql_query.strip()
|
120 |
|
121 |
+
|
122 |
def run_query(query):
|
123 |
# Clean the query by removing markdown symbols and trimming whitespace
|
124 |
clean_query = query.replace("```sql", "").replace("```", "").strip()
|
|
|
130 |
print(f"Error executing query: {e}")
|
131 |
return None
|
132 |
|
133 |
+
|
134 |
# Define the database query tool
|
135 |
# The function that uses the above models
|
136 |
# Define the function that will handle the database query
|
|
|
140 |
print(sql_query)
|
141 |
return run_query(sql_query)
|
142 |
|
143 |
+
|
144 |
def get_ASN_data(question):
|
145 |
+
# print(question)
|
146 |
+
base_url = os.getenv("ASN_API_URL")
|
147 |
+
complete_url = f"{base_url}branchMaster.id=343&transactionUid={question}&userId=164&transactionType=ASN"
|
148 |
+
print("complete url")
|
149 |
+
print(complete_url)
|
150 |
+
try:
|
151 |
+
response = requests.get(complete_url)
|
152 |
+
data = response.json()
|
153 |
+
response.raise_for_status()
|
154 |
+
|
155 |
+
if 'result' in data and 'content' in data['result'] and data['result']['content']:
|
156 |
+
content = data['result']['content'][0]
|
157 |
+
trnHeaderAsn = content['trnHeaderAsn']
|
158 |
+
party = content['party'][0]
|
159 |
+
|
160 |
+
transactionUid = trnHeaderAsn['transactionUid']
|
161 |
+
customerOrderNo = trnHeaderAsn.get('customerOrderNo', 'N/A')
|
162 |
+
orderDate = trnHeaderAsn.get('orderDate', 'N/A')
|
163 |
+
customerInvoiceNo = trnHeaderAsn.get('customerInvoiceNo', 'N/A')
|
164 |
+
invoiceDate = trnHeaderAsn.get('invoiceDate', 'N/A')
|
165 |
+
expectedReceivingDate = trnHeaderAsn['expectedReceivingDate']
|
166 |
+
transactionStatus = trnHeaderAsn['transactionStatus']
|
167 |
+
shipper_code = party['shipper']['code'] if party['shipper'] else 'N/A'
|
168 |
+
shipper_name = party['shipper']['name'] if party['shipper'] else 'N/A'
|
169 |
+
|
170 |
+
data = [
|
171 |
+
["Transaction UID", transactionUid],
|
172 |
+
["Customer Order No", customerOrderNo],
|
173 |
+
["Order Date", orderDate],
|
174 |
+
["Customer Invoice No", customerInvoiceNo],
|
175 |
+
["Invoice Date", invoiceDate],
|
176 |
+
["Expected Receiving Date", expectedReceivingDate],
|
177 |
+
["Transaction Status", transactionStatus],
|
178 |
+
["Shipper Code", shipper_code],
|
179 |
+
["Shipper Name", shipper_name]
|
180 |
+
]
|
181 |
+
return f"The ASN details of {question} is {data}."
|
182 |
+
else:
|
183 |
+
return "ASN Details are not found. Please contact system administrator."
|
184 |
+
|
185 |
+
except requests.exceptions.HTTPError as http_err:
|
186 |
+
print(f"HTTP error occurred: {http_err}")
|
187 |
+
except Exception as err:
|
188 |
+
print(f"An error occurred: {err}")
|
189 |
+
|
190 |
+
|
191 |
+
# get_ASN_data("ASN24072400001")
|
192 |
|
193 |
def load_and_split_pdf(pdf_path):
|
194 |
reader = PdfReader(pdf_path)
|
|
|
198 |
text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=50)
|
199 |
texts = text_splitter.split_text(text)
|
200 |
return texts
|
201 |
+
|
202 |
+
|
203 |
def create_vector_store(texts):
|
204 |
+
embeddings = OpenAIEmbeddings()
|
205 |
+
vector_store = FAISS.from_texts(texts, embeddings)
|
206 |
+
return vector_store
|
207 |
|
208 |
+
|
209 |
+
def query_vector_store(vector_store, query, config=None):
|
210 |
+
if config:
|
211 |
+
print("Config passed:", config)
|
212 |
docs = vector_store.similarity_search(query, k=5)
|
213 |
print(f"Vector store return: {docs}")
|
214 |
return docs
|
215 |
|
216 |
+
|
217 |
def summarize_document(docs):
|
218 |
summarized_docs = []
|
219 |
for doc in docs:
|
|
|
230 |
summarized_docs.append(summarized_content)
|
231 |
return '\n\n'.join(summarized_docs)
|
232 |
|
233 |
+
|
234 |
texts = load_and_split_pdf(pdf_path)
|
235 |
vector_store = create_vector_store(texts)
|
236 |
|
237 |
+
|
238 |
def document_data_tool(question):
|
239 |
print(f"Document data tool enter: {question}")
|
240 |
# query_string = question['tags'][0] if 'tags' in question and question['tags'] else ""
|
241 |
+
query_response = query_vector_store(vector_store, question, config={"callbacks": [langfuse_handler]})
|
242 |
print("query****")
|
243 |
print(query_response)
|
244 |
+
# summarized_response = summarize_document(query_response)
|
245 |
+
# print("summary***")
|
246 |
+
# print(summarized_response)
|
247 |
return query_response
|
248 |
+
|
249 |
+
|
250 |
def send_email_with_attachment(recipient_email, subject, body, attachment_path):
|
251 |
+
sender_email = os.getenv("EMAIL_SENDER")
|
252 |
+
sender_password = os.getenv("EMAIL_PASSWORD")
|
253 |
|
254 |
# Create a multipart message
|
255 |
msg = MIMEMultipart()
|
|
|
284 |
text = msg.as_string()
|
285 |
server.sendmail(sender_email, recipient_email, text)
|
286 |
server.quit()
|
287 |
+
# return 1
|
288 |
+
|
289 |
|
290 |
def make_api_request(url, params):
|
291 |
+
"""Generic function to make API GET requests and return JSON data."""
|
292 |
+
try:
|
293 |
+
|
294 |
+
response = requests.get(url, params=params)
|
295 |
+
response.raise_for_status() # Raises an HTTPError if the response was an error
|
296 |
+
return response.json() # Return the parsed JSON data
|
297 |
+
except requests.exceptions.HTTPError as http_err:
|
298 |
+
print(f"HTTP error occurred: {http_err}")
|
299 |
+
except Exception as err:
|
300 |
+
print(f"An error occurred: {err}")
|
301 |
+
|
302 |
+
|
303 |
+
|
304 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
305 |
|
306 |
def inventory_report(question):
|
307 |
+
print(question)
|
308 |
+
|
309 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
310 |
|
311 |
+
# Split the question to extract warehouse name, user question, and optional email
|
312 |
+
if question.count(":") > 0:
|
313 |
+
parts = question.split(":", 2)
|
314 |
+
warehouse_name= parts[0].strip()
|
315 |
+
user_question = parts[1].strip()
|
316 |
+
user_email = parts[2].strip() if len(parts) > 2 else None
|
317 |
+
print(f"Warehouse: {warehouse_name}, Email: {user_email}, Question: {user_question}")
|
318 |
+
else:
|
319 |
+
return "warehouse name not found"
|
320 |
+
|
321 |
+
print(apis[0]["url"])
|
322 |
+
print(apis[0]["params"])
|
323 |
+
data = make_api_request(apis[0]["url"], apis[0]["params"])
|
324 |
+
print(warehouse_name)
|
325 |
+
if data:
|
326 |
+
print(data)
|
327 |
+
# Extracting the id for the warehouse with the name "WH"
|
328 |
+
warehouse_id = next((item['id'] for item in data['result'] if item['name'] == warehouse_name), None)
|
329 |
+
print(warehouse_id)
|
330 |
+
if warehouse_id is None:
|
331 |
+
return "Please provide a warehouse name available in the database."
|
332 |
+
# print(f"The id for the warehouse named {name} is: {warehouse_id}")
|
333 |
+
# Step 3: Update the placeholder with the actual warehouse_id
|
334 |
+
for api in apis:
|
335 |
+
if "warehouseId" in api["params"]:
|
336 |
+
api["params"]["warehouseId"] = warehouse_id
|
337 |
+
|
338 |
+
data1 = make_api_request(apis[1]["url"], apis[1]["params"])
|
339 |
+
|
340 |
+
from tabulate import tabulate
|
341 |
+
|
342 |
+
headers = ["S.No", "Warehouse Code", "Warehouse Name", "Customer Code", "Customer Name", "Item Code", "Item Name",
|
343 |
+
"Currency", "EAN", "UOM", "Quantity", "Gross Weight", "Volume", "Total Value"]
|
344 |
+
table_data = []
|
345 |
+
|
346 |
+
for index, item in enumerate(data1['result'], start=1):
|
347 |
+
row = [
|
348 |
+
index, # Serial number
|
349 |
+
item['warehouse']['code'],
|
350 |
+
item['warehouse']['name'],
|
351 |
+
item['customer']['code'],
|
352 |
+
item['customer']['name'],
|
353 |
+
item['skuMaster']['code'],
|
354 |
+
item['skuMaster']['name'],
|
355 |
+
item['currency']['code'],
|
356 |
+
item['eanUpc'],
|
357 |
+
item['uom']['code'],
|
358 |
+
item['totalQty'],
|
359 |
+
item['grossWeight'],
|
360 |
+
item['volume'],
|
361 |
+
item['totalValue']
|
362 |
+
]
|
363 |
+
table_data.append(row)
|
364 |
+
|
365 |
+
# Convert to pandas DataFrame
|
366 |
+
df = pd.DataFrame(table_data, columns=headers)
|
367 |
+
|
368 |
+
sdf = SmartDataframe(df, config={"llm": llm_chart})
|
369 |
+
|
370 |
+
# chart = sdf.chat("Can you draw a bar chart with all avaialble item name and quantity.")
|
371 |
+
chart = sdf.chat(question)
|
372 |
+
|
373 |
+
|
374 |
+
|
375 |
+
return chart
|
376 |
+
|
377 |
+
|
378 |
+
# inventory_report("WH:can you give me a bar chart with item name and quantity for the warehouse WH")
|
379 |
|
380 |
# Define input and output models using Pydantic
|
381 |
class QueryInput(BaseModel):
|
382 |
+
question: str = Field(
|
383 |
+
description="The question to be answered by appropriate tool. Please follow the instructions. For API tool, do not send the question as it is. Please send the ASN id.")# Invoke datavisulaization tool by processing the user question and send two inputs to the tool. One input will be the warehouse name and another input to the tool will be the entire user_question itself. Please join those two strings and send them as a single input string with ':' as delimiter")
|
384 |
# config: dict = Field(default={}, description="Optional configuration for the database query.")
|
385 |
|
386 |
|
387 |
# Define the output model for database queries
|
388 |
class QueryOutput(BaseModel):
|
389 |
+
result: str = Field(...,
|
390 |
+
description="Display the answer based on the prompts given in each tool. For dataVisualization tool, it sends a image file as output. Please give the image file path only to the gr.Image. For DocumentData tool, Please provide a complete and concise response within 200 words and Ensure that the response is not truncated and covers the essential points.")
|
391 |
+
|
392 |
|
393 |
# Wrap the function with StructuredTool for better parameter handling
|
394 |
tools = [
|
|
|
400 |
description="Tool to get details of ASN api. ASN id will be in the input with the format of first three letters as ASN and it is followed by 11 digit numeral. Pass only the id as input. Do not send the complete user question to the tool. If there are any other queries related to ASN without ASN id, please use the document tool."
|
401 |
),
|
402 |
StructuredTool(
|
403 |
+
func=document_data_tool,
|
404 |
+
name="DocumentData",
|
405 |
+
args_schema=QueryInput,
|
406 |
+
output_schema=QueryOutput,
|
407 |
+
description="You are an AI assistant trained to help with warehouse management questions based on a detailed document about our WMS. The document covers various processes such as ASN handling, purchase orders, cross docking, appointment scheduling for shipments, and yard management. Please provide a complete and concise response within 200 words and Ensure that the response is not truncated and covers the essential points. "
|
408 |
),
|
409 |
StructuredTool(
|
410 |
func=database_tool,
|
|
|
418 |
name="dataVisualization",
|
419 |
args_schema=QueryInput,
|
420 |
output_schema=QueryOutput,
|
421 |
+
description=""" Tool to generate a visual chart output for a particular warehouse based on the provided question.
|
|
|
422 |
This tool processes the user question to identify the warehouse name and the specific request. If the user specifies
|
423 |
an email, include the email in the input. The input format should be: 'warehouse name: user question: email (if any)'.
|
424 |
The tool generates the requested chart and sends it to the provided email if specified.
|
|
|
425 |
Examples:
|
426 |
+
1. Question without email, without warehouse: "Analyze item name and quantity in a bar chart in warehouse"
|
427 |
+
Input to tool: "I want to analyze item name and quantity in a bar chart"
|
428 |
+
2. Question with email: "Analyze item name and quantity in a bar chart in warehouse Allcargo Logistics and send email to example@example.com"
|
|
|
429 |
Input to tool: "Allcargo Logistics: I want to analyze item name and quantity in a bar chart: example@example.com"
|
430 |
"""
|
431 |
)
|
|
|
444 |
agent = create_tool_calling_agent(llm, tools, ChatPromptTemplate.from_template(prompt_template))
|
445 |
agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True)
|
446 |
|
447 |
+
|
448 |
def ensure_temp_chart_dir():
|
449 |
+
temp_chart_dir = os.getenv("IMAGE_MAIN_URL")
|
450 |
if not os.path.exists(temp_chart_dir):
|
451 |
os.makedirs(temp_chart_dir)
|
452 |
|
453 |
+
|
454 |
def clean_gradio_tmp_dir():
|
455 |
+
tmp_dir = os.getenv("IMAGE_GRADIO_PATH")
|
456 |
if os.path.exists(tmp_dir):
|
457 |
try:
|
458 |
shutil.rmtree(tmp_dir)
|
459 |
except Exception as e:
|
460 |
print(f"Error cleaning up /tmp/gradio/ directory: {e}")
|
461 |
|
462 |
+
|
463 |
# Define the interface function
|
464 |
max_iterations = 5
|
465 |
iterations = 0
|
466 |
|
467 |
+
|
468 |
def answer_question(user_question, chatbot, audio=None):
|
469 |
+
|
470 |
global iterations
|
471 |
iterations = 0
|
472 |
# Ensure the temporary chart directory exists
|
473 |
+
# ensure_temp_chart_dir()
|
474 |
# Clean the /tmp/gradio/ directory
|
475 |
+
# clean_gradio_tmp_dir()
|
476 |
# Handle audio input if provided
|
477 |
if audio is not None:
|
478 |
sample_rate, audio_data = audio
|
|
|
498 |
|
499 |
while iterations < max_iterations:
|
500 |
print(user_question)
|
501 |
+
"""if "send email to" in user_question:
|
502 |
+
email_match = re.search(r"send email to ([\w\.-]+@[\w\.-]+)", user_question)
|
503 |
+
if email_match:
|
504 |
+
user_email = email_match.group(1).strip()
|
505 |
+
user_question = user_question.replace(f"send email to {user_email}", "").strip()
|
506 |
+
user_question = f"{user_question}:{user_email}"
|
507 |
+
"""
|
508 |
+
|
509 |
+
response = agent_executor.invoke({"input": user_question}, config={"callbacks": [langfuse_handler]})
|
510 |
+
print("name")
|
511 |
+
print(warehouse_name)
|
512 |
if isinstance(response, dict):
|
513 |
response_text = response.get("output", "")
|
514 |
else:
|
|
|
519 |
|
520 |
if iterations == max_iterations:
|
521 |
return "The agent could not generate a valid response within the iteration limit."
|
522 |
+
|
523 |
+
|
524 |
+
if os.getenv("IMAGE_PATH") in response_text:
|
525 |
# Open the image file
|
526 |
+
img = Image.open(os.getenv("IMAGE_PATH"))
|
527 |
+
|
528 |
# Convert the PIL Image to a base64 encoded string
|
529 |
buffered = BytesIO()
|
530 |
img.save(buffered, format="PNG")
|
531 |
img_str = base64.b64encode(buffered.getvalue()).decode("utf-8")
|
532 |
+
# print(img_str)
|
533 |
+
img = f'<img src="data:image/png;base64,{img_str}" style="width:500px; height:400px;">'
|
534 |
+
# image = gr.Image(value=img_str)
|
535 |
+
chatbot.append((user_question, img))
|
536 |
+
# print(chatbot)
|
537 |
+
# email send
|
538 |
+
if user_email:
|
539 |
+
# Send email with the chart image attached
|
540 |
+
send_email_with_attachment(
|
541 |
+
recipient_email=user_email,
|
542 |
+
subject="Warehouse Inventory Report",
|
543 |
+
body=response.get("output", "").split(":")[0],
|
544 |
+
# attachment_path=chart_path
|
545 |
+
attachment_path=os.getenv("IMAGE_PATH")
|
546 |
+
)
|
547 |
+
|
548 |
if "send email to" in user_question:
|
549 |
try:
|
550 |
+
os.remove(img) # Clean up the temporary image file
|
551 |
except Exception as e:
|
552 |
print(f"Error cleaning up image file: {e}")
|
553 |
except Exception as e:
|
554 |
print(f"Error loading image file: {e}")
|
555 |
chatbot.append((user_question, "Chart generation failed. Please try again."))
|
556 |
return gr.update(value=chatbot)
|
|
|
557 |
|
558 |
+
|
559 |
+
# return [(user_question,gr.Image("/home/user/app/exports/charts/temp_chart.png"))]
|
560 |
+
# return "/home/user/app/exports/charts/temp_chart.png"
|
561 |
else:
|
562 |
chatbot.append((user_question, response_text))
|
563 |
+
# print(chatbot)
|
564 |
return gr.update(value=chatbot)
|
565 |
+
# response_text = response_text.replace('\n', ' ').replace(' ', ' ').strip()
|
566 |
+
# return response_text
|
|
|
|
|
567 |
|
568 |
css = """
|
|
|
569 |
.gr-chatbot {
|
570 |
/* Custom styles for the Chatbot component */
|
571 |
border: 1px solid #ccc;
|
|
|
576 |
/* Adjust the width as needed */
|
577 |
overflow-y: auto; /* Add scroll if the content exceeds the height */
|
578 |
}
|
|
|
|
|
579 |
.gr-button {
|
580 |
height: 40px; /* Adjust the height as needed */
|
581 |
+
|
582 |
+
}
|
583 |
+
.message-buttons-right{
|
584 |
+
display: none !important;
|
585 |
+
}
|
586 |
+
.gr-audio {
|
587 |
+
width: 30px;
|
588 |
+
height: 30px;
|
589 |
+
border-radius: 50%;
|
590 |
+
|
591 |
+
background-size: cover;
|
592 |
+
background-position: center;
|
593 |
+
margin-left: 10px;
|
594 |
+
cursor: pointer;
|
595 |
+
}
|
596 |
+
.custom-button {
|
597 |
+
background-color: #4CAF50; /* Initial Green */
|
598 |
+
border: none;
|
599 |
+
color: black;
|
600 |
+
padding: 15px 32px;
|
601 |
+
text-align: center;
|
602 |
+
text-decoration: none;
|
603 |
+
display: inline-block;
|
604 |
+
font-size: 11px;
|
605 |
+
margin: 4px 2px;
|
606 |
+
width: 50px; /* Set width to 50px */
|
607 |
+
height: 100px; /* Set height to 50px */
|
608 |
+
transition: background-color 0.4s, transform 0.5s ease-in-out; /* Smooth transition */
|
609 |
+
cursor: pointer;
|
610 |
+
}
|
611 |
|
612 |
+
.custom-button:hover {
|
613 |
+
background-color: #45a049; /* Darker Green on hover */
|
614 |
+
transform: scale(1.1); /* Scale up slightly on hover */
|
615 |
+
}
|
616 |
+
|
617 |
+
@keyframes button-animation {
|
618 |
+
0% { transform: scale(1); }
|
619 |
+
50% { transform: scale(1.1); }
|
620 |
+
100% { transform: scale(1); }
|
621 |
+
}
|
622 |
"""
|
623 |
|
624 |
def submit_feedback(feedback, chatbot):
|
625 |
+
gr.Info("Thank you for your feedback.")
|
626 |
feedback_response = "User feedback: " + feedback
|
627 |
+
return chatbot + [(feedback_response, None)], gr.update(visible=False)
|
628 |
|
629 |
def handle_dislike(data: gr.LikeData):
|
630 |
if not data.liked:
|
631 |
print("downvote")
|
632 |
+
gr.Info("Please enter your feedback.")
|
633 |
return gr.update(visible=True), gr.update(visible=True)
|
634 |
else:
|
635 |
print("upvote")
|
636 |
return gr.update(visible=False), gr.update(visible=False)
|
637 |
|
638 |
|
|
|
|
|
639 |
with gr.Blocks(css=css) as demo:
|
640 |
gr.Markdown("<CENTER><h2 style='font-size: 20px; font-family: Calibri;'>NewageNXT GPT</h2></CENTER>")
|
641 |
+
with gr.Row():
|
642 |
+
sample_button = gr.Button("What are the details of ASN24080600008", elem_classes="custom-button")
|
643 |
+
sample_button1 = gr.Button("What are the active warehouses", elem_classes="custom-button")
|
644 |
+
sample_button2 = gr.Button("Explain Pre-Receiving Yard Management", elem_classes="custom-button")
|
645 |
+
sample_button3 = gr.Button("Can you generate a pie chart with item names and quantities in warehouse WH", elem_classes="custom-button")
|
646 |
+
sample_button4 = gr.Button("I want to analyze item name and quantity in a bar chart in warehouse WH and send email to lakshmivairamani@gmail.comk", elem_classes="custom-button")
|
647 |
+
|
648 |
+
with gr.Row():
|
649 |
+
chatbot = gr.Chatbot(elem_classes="gr-chatbot",
|
650 |
+
label="Ask a question about the API, Database, a Document or Warehouse inventory analysis.") # .style(color_map=["blue","grey","red"])
|
651 |
+
|
652 |
+
|
653 |
with gr.Row():
|
654 |
with gr.Column(scale=1):
|
655 |
+
message = gr.Textbox(show_label=False, container=False, placeholder="Please enter your question")
|
656 |
+
with gr.Row():
|
657 |
+
feedback_textbox = gr.Textbox(visible=False, show_label=False,container=False, placeholder = "Please enter your feedback.")
|
658 |
+
submit_feedback_button = gr.Button("Submit Feedback", visible=False, elem_classes="gr-button")
|
659 |
+
with gr.Column(scale=2):
|
660 |
with gr.Row():
|
661 |
+
audio_input = gr.Audio(sources=["microphone"], show_label=False, container=False,
|
662 |
+
waveform_options=gr.WaveformOptions(show_controls=False))
|
663 |
button = gr.Button("Submit", elem_classes="gr-button")
|
664 |
gr.ClearButton(message, elem_classes="gr-button")
|
|
|
|
|
|
|
|
|
|
|
665 |
|
666 |
+
button.click(answer_question, [message, chatbot, audio_input], [chatbot])
|
667 |
+
|
668 |
+
message.submit(answer_question, [message, chatbot, audio_input], [chatbot])
|
669 |
message.submit(lambda x: gr.update(value=""), None, [message], queue=False)
|
670 |
button.click(lambda x: gr.update(value=''), [], [message])
|
|
|
|
|
671 |
|
672 |
+
chatbot.like(handle_dislike, None, outputs=[feedback_textbox, submit_feedback_button])
|
673 |
+
submit_feedback_button.click(submit_feedback, [feedback_textbox, chatbot],
|
674 |
+
[chatbot, feedback_textbox, submit_feedback_button])
|
675 |
submit_feedback_button.click(lambda x: gr.update(value=''), [], [feedback_textbox])
|
|
|
676 |
|
677 |
+
sample_button.click(
|
678 |
+
answer_question,[sample_button, chatbot, audio_input], [chatbot])
|
679 |
+
sample_button1.click(
|
680 |
+
answer_question, [sample_button1, chatbot, audio_input], [chatbot])
|
681 |
+
sample_button2.click(
|
682 |
+
answer_question, [sample_button2, chatbot, audio_input], [chatbot])
|
683 |
+
sample_button3.click(
|
684 |
+
answer_question, [sample_button3, chatbot, audio_input], [chatbot])
|
685 |
+
sample_button4.click(
|
686 |
+
answer_question, [sample_button4, chatbot, audio_input], [chatbot])
|
687 |
|
|
|
688 |
|
|
|
|
|
|
|
689 |
|
690 |
+
demo.launch()
|