Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -11,18 +11,20 @@ import time
|
|
11 |
import shutil
|
12 |
import json
|
13 |
import nltk
|
14 |
-
|
|
|
|
|
15 |
# audio package
|
16 |
-
|
17 |
from pydub import AudioSegment
|
18 |
-
from pydub.playback import play
|
19 |
-
#
|
20 |
# email library
|
21 |
-
|
22 |
from email.mime.multipart import MIMEMultipart
|
23 |
from email.mime.text import MIMEText
|
24 |
from email.mime.base import MIMEBase
|
25 |
-
from email import encoders
|
26 |
# langchain
|
27 |
from langchain_core.prompts import ChatPromptTemplate
|
28 |
from langchain_core.output_parsers import StrOutputParser
|
@@ -34,12 +36,14 @@ from langchain_community.utilities import SQLDatabase
|
|
34 |
from langchain.agents import create_tool_calling_agent, AgentExecutor, Tool
|
35 |
from langchain.text_splitter import RecursiveCharacterTextSplitter
|
36 |
from langchain.tools import StructuredTool
|
37 |
-
from langchain.pydantic_v1 import BaseModel, Field
|
|
|
38 |
from PyPDF2 import PdfReader
|
39 |
from nltk.tokenize import sent_tokenize
|
40 |
from datetime import datetime
|
41 |
from sqlalchemy import create_engine
|
42 |
from sqlalchemy.sql import text
|
|
|
43 |
|
44 |
# pandas
|
45 |
import pandas as pd
|
@@ -64,8 +68,24 @@ import threading
|
|
64 |
from mailjet_rest import Client
|
65 |
import base64
|
66 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
67 |
# Define global variables for managing the thread and current_event
|
68 |
executor = concurrent.futures.ThreadPoolExecutor(max_workers=1)
|
|
|
69 |
current_event = None
|
70 |
stop_event = threading.Event()
|
71 |
|
@@ -74,19 +94,25 @@ os.environ["LANGFUSE_PUBLIC_KEY"] = os.getenv("LANGFUSE_PUBLIC_KEY")
|
|
74 |
os.environ["LANGFUSE_SECRET_KEY"] = os.getenv("LANGFUSE_SECRET_KEY")
|
75 |
os.environ["LANGFUSE_HOST"] = os.getenv("LANGFUSE_HOST")
|
76 |
|
|
|
|
|
|
|
|
|
|
|
|
|
77 |
langfuse_handler = CallbackHandler()
|
78 |
langfuse_handler.auth_check() # Optional: Checks if the authentication is successful
|
79 |
|
80 |
nltk.download('punkt')
|
81 |
|
82 |
-
open_api_key_token = os.getenv("
|
83 |
|
84 |
os.environ['OPENAI_API_KEY'] = open_api_key_token
|
85 |
pdf_path = "Inbound.pdf"
|
86 |
|
87 |
db_uri = os.getenv("POSTGRESQL_CONNECTION")
|
88 |
-
# Database setup
|
89 |
|
|
|
90 |
db = SQLDatabase.from_uri(db_uri)
|
91 |
|
92 |
user_email = ""
|
@@ -94,6 +120,7 @@ warehouse_name = ""
|
|
94 |
warehouse_id = ""
|
95 |
# Today's date to be populated in inventory API
|
96 |
inventory_date = datetime.today().strftime('%Y-%m-%d')
|
|
|
97 |
apis = [
|
98 |
# fetch warehouse ID
|
99 |
{
|
@@ -112,7 +139,6 @@ apis = [
|
|
112 |
llm = ChatOpenAI(model="gpt-4o-mini", max_tokens=300, temperature=0.1)
|
113 |
llm_chart = OpenAI()
|
114 |
|
115 |
-
|
116 |
def get_schema(_):
|
117 |
schema_info = db.get_table_info() # This should be a string of your SQL schema
|
118 |
return schema_info
|
@@ -161,9 +187,12 @@ def database_tool(question):
|
|
161 |
|
162 |
def get_ASN_data(question):
|
163 |
base_url = os.getenv("ASN_API_URL")
|
|
|
164 |
complete_url = f"{base_url}branchMaster.id=343&transactionUid={question}&userId=164&transactionType=ASN"
|
165 |
try:
|
166 |
response = requests.get(complete_url)
|
|
|
|
|
167 |
data = response.json()
|
168 |
response.raise_for_status()
|
169 |
|
@@ -246,6 +275,10 @@ def summarize_document(docs):
|
|
246 |
texts = load_and_split_pdf(pdf_path)
|
247 |
vector_store = create_vector_store(texts)
|
248 |
|
|
|
|
|
|
|
|
|
249 |
|
250 |
def document_data_tool(question):
|
251 |
print(f"Document data tool enter: {question}")
|
@@ -256,8 +289,6 @@ def document_data_tool(question):
|
|
256 |
|
257 |
# mailjet API since SMTP is not supported HF spaces
|
258 |
def send_email_with_attachment_mailjet(recipient_email, subject, body, attach_img_base64=None):
|
259 |
-
|
260 |
-
|
261 |
api_key = os.getenv("MAILJET_API_KEY")
|
262 |
api_secret = os.getenv("MAILJET_API_SECRET")
|
263 |
|
@@ -350,8 +381,6 @@ def send_email_with_attachment(recipient_email, subject, body, attachment_path):
|
|
350 |
def make_api_request(url, params):
|
351 |
"""Generic function to make API GET requests and return JSON data."""
|
352 |
try:
|
353 |
-
print(url)
|
354 |
-
print(params)
|
355 |
response = requests.get(url, params=params)
|
356 |
response.raise_for_status() # Raises an HTTPError if the response was an error
|
357 |
return response.json() # Return the parsed JSON data
|
@@ -377,18 +406,14 @@ def inventory_report(question):
|
|
377 |
if data:
|
378 |
# Extracting the id for the warehouse with the name "WH"
|
379 |
warehouse_id = next((item['id'] for item in data['result'] if item['wareHouseId'] == warehouse_name), None)
|
380 |
-
|
381 |
-
if (warehouse_id):
|
382 |
-
|
383 |
-
# print(f"The id for the warehouse named {name} is: {warehouse_id}")
|
384 |
# Step 3: Update the placeholder with the actual warehouse_id
|
385 |
for api in apis:
|
386 |
if "warehouseId" in api["params"]:
|
387 |
api["params"]["warehouseId"] = warehouse_id
|
388 |
-
|
389 |
-
print(f"warehouseId: {warehouse_id}")
|
390 |
-
print(f"warehouseId: {apis[1]}")
|
391 |
-
|
392 |
data1 = make_api_request(apis[1]["url"], apis[1]["params"])
|
393 |
if (data1):
|
394 |
headers = ["S.No", "Warehouse Code", "Warehouse Name", "Customer Code", "Customer Name", "Item Code", "Item Name",
|
@@ -416,20 +441,25 @@ def inventory_report(question):
|
|
416 |
|
417 |
# Convert to pandas DataFrame
|
418 |
df = pd.DataFrame(table_data, columns=headers)
|
|
|
|
|
419 |
|
420 |
-
|
421 |
-
|
422 |
-
# chart = sdf.chat("Can you draw a bar chart with all avaialble item name and quantity.")
|
423 |
-
chart = sdf.chat(question)
|
424 |
-
|
425 |
-
return chart
|
426 |
else:
|
427 |
return "There are no inventory details for the warehouse you have given."
|
428 |
else:
|
429 |
return "Please provide a warehouse name available in the database."
|
430 |
|
|
|
|
|
|
|
|
|
431 |
|
432 |
-
|
|
|
|
|
|
|
|
|
433 |
|
434 |
# Define input and output models using Pydantic
|
435 |
class QueryInput(BaseModel):
|
@@ -446,6 +476,7 @@ class QueryOutput(BaseModel):
|
|
446 |
|
447 |
# Wrap the function with StructuredTool for better parameter handling
|
448 |
tools = [
|
|
|
449 |
StructuredTool(
|
450 |
func=get_ASN_data,
|
451 |
name="APIData",
|
@@ -486,6 +517,7 @@ tools = [
|
|
486 |
]
|
487 |
|
488 |
prompt_template = f"""You are an assistant that helps with database queries, API information, and document retrieval. Your job is to provide clear, complete, and detailed responses to the following queries. Please give the output response in an user friendly way and remove "**" from the response. For example, document related queries can be answered in a clear and concise way with numbering and not as a paragraph. Database related queries should be answered with proper indentation and use numbering for the rows. ASN id related queries should be answered with proper indentation and use numbering for the rows.
|
|
|
489 |
For ASN id related questions, if the user specifies an ASN id, provide the information from the api tool. Pass only the id as input to the tool. Do not pass the entire question as input to the tool. If the details are not found, say it in a clear and concise way.
|
490 |
You are an AI assistant trained to help with warehouse management questions based on a detailed document about our WMS. The document covers various processes such as ASN handling, purchase orders, cross docking, appointment scheduling for shipments, and yard management. Please provide a complete and concise response within 200 words and Ensure that the response is not truncated and covers the essential points. When answering, focus on providing actionable insights and clear explanations related to the specific query. Please remove "**" from the response.
|
491 |
For SQL database-related questions, only use the fields available in the warehouse schema, including tables such as customer_master, efs_company_master, efs_group_company_master, efs_region_master, party_address_detail, wms_warehouse_master.
|
@@ -493,18 +525,13 @@ For datavisualization, user will ask for inventory report of a particular wareho
|
|
493 |
{{agent_scratchpad}}
|
494 |
Here is the information you need to process:
|
495 |
Question: {{input}}"""
|
496 |
-
|
497 |
-
llm = llm.bind()
|
498 |
-
agent = create_tool_calling_agent(llm, tools, ChatPromptTemplate.from_template(prompt_template))
|
499 |
-
agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True)
|
500 |
-
|
501 |
|
502 |
def ensure_temp_chart_dir():
|
503 |
temp_chart_dir = os.getenv("IMAGE_MAIN_URL")
|
504 |
if not os.path.exists(temp_chart_dir):
|
505 |
os.makedirs(temp_chart_dir)
|
506 |
|
507 |
-
|
508 |
def clean_gradio_tmp_dir():
|
509 |
tmp_dir = os.getenv("IMAGE_GRADIO_PATH")
|
510 |
if os.path.exists(tmp_dir):
|
@@ -529,9 +556,9 @@ def handle_query(user_question, chatbot, audio=None):
|
|
529 |
# Clear previous stop event and current_event
|
530 |
stop_event.clear()
|
531 |
|
532 |
-
|
533 |
-
|
534 |
-
|
535 |
|
536 |
# Start the processing in a new thread
|
537 |
current_event = executor.submit(answer_question_thread, user_question, chatbot)
|
@@ -539,7 +566,13 @@ def handle_query(user_question, chatbot, audio=None):
|
|
539 |
# Periodically check if current_event is done
|
540 |
while not current_event.done():
|
541 |
if stop_event.is_set():
|
542 |
-
current_event.cancel()
|
|
|
|
|
|
|
|
|
|
|
|
|
543 |
chatbot.append((user_question, "Sorry, we encountered an error while processing your request. Please try after some time."))
|
544 |
return gr.update(value=chatbot)
|
545 |
|
@@ -574,7 +607,7 @@ def stop_processing(chatbot):
|
|
574 |
return gr.update(value=chatbot)
|
575 |
|
576 |
# This function is for agent executor invoke with the option of stop
|
577 |
-
def answer_question_thread(user_question, chatbot,
|
578 |
|
579 |
global iterations
|
580 |
iterations = 0
|
@@ -609,18 +642,8 @@ def answer_question_thread(user_question, chatbot, audio=None):
|
|
609 |
|
610 |
while iterations < max_iterations:
|
611 |
|
612 |
-
"""if "send email to" in user_question:
|
613 |
-
email_match = re.search(r"send email to ([\w\.-]+@[\w\.-]+)", user_question)
|
614 |
-
if email_match:
|
615 |
-
user_email = email_match.group(1).strip()
|
616 |
-
user_question = user_question.replace(f"send email to {user_email}", "").strip()
|
617 |
-
user_question = f"{user_question}:{user_email}"
|
618 |
-
"""
|
619 |
-
|
620 |
response = agent_executor.invoke({"input": user_question}, config={"callbacks": [langfuse_handler]}, early_stopping_method="generate")
|
621 |
-
|
622 |
-
print(response)
|
623 |
-
|
624 |
if isinstance(response, dict):
|
625 |
response_text = response.get("output", "")
|
626 |
else:
|
@@ -711,14 +734,6 @@ def answer_question(user_question, chatbot, audio=None):
|
|
711 |
|
712 |
while iterations < max_iterations:
|
713 |
|
714 |
-
"""if "send email to" in user_question:
|
715 |
-
email_match = re.search(r"send email to ([\w\.-]+@[\w\.-]+)", user_question)
|
716 |
-
if email_match:
|
717 |
-
user_email = email_match.group(1).strip()
|
718 |
-
user_question = user_question.replace(f"send email to {user_email}", "").strip()
|
719 |
-
user_question = f"{user_question}:{user_email}"
|
720 |
-
"""
|
721 |
-
|
722 |
response = agent_executor.invoke({"input": user_question}, config={"callbacks": [langfuse_handler]})
|
723 |
|
724 |
if isinstance(response, dict):
|
@@ -731,6 +746,8 @@ def answer_question(user_question, chatbot, audio=None):
|
|
731 |
|
732 |
if iterations == max_iterations:
|
733 |
return "The agent could not generate a valid response within the iteration limit."
|
|
|
|
|
734 |
|
735 |
if os.getenv("IMAGE_PATH") in response_text:
|
736 |
# Open the image file
|
@@ -742,7 +759,7 @@ def answer_question(user_question, chatbot, audio=None):
|
|
742 |
img_str = base64.b64encode(buffered.getvalue()).decode("utf-8")
|
743 |
|
744 |
img = f'<img src="data:image/png;base64,{img_str}" style="width:450px; height:400px;">'
|
745 |
-
|
746 |
chatbot.append((user_question, img))
|
747 |
|
748 |
email_pattern = r'[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\.[a-zA-Z]{2,}'
|
@@ -778,21 +795,45 @@ def answer_question(user_question, chatbot, audio=None):
|
|
778 |
print(f"Error loading image file: {e}")
|
779 |
chatbot.append((user_question, "Chart generation failed. Please try again."))
|
780 |
return gr.update(value=chatbot)
|
781 |
-
|
782 |
-
|
783 |
-
# return [(user_question,gr.Image("/home/user/app/exports/charts/temp_chart.png"))]
|
784 |
-
# return "/home/user/app/exports/charts/temp_chart.png"
|
785 |
else:
|
786 |
chatbot.append((user_question, response_text))
|
787 |
return gr.update(value=chatbot)
|
788 |
-
|
789 |
-
|
790 |
-
|
791 |
-
def submit_feedback(feedback, chatbot):
|
792 |
gr.Info("Thank you for your feedback.")
|
|
|
|
|
793 |
feedback_response = "User feedback: " + feedback
|
794 |
return chatbot + [(feedback_response, None)], gr.update(visible=False), gr.update(visible=False)
|
795 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
796 |
def handle_dislike(data: gr.LikeData):
|
797 |
if not data.liked:
|
798 |
print("downvote")
|
@@ -806,13 +847,369 @@ def handle_dislike(data: gr.LikeData):
|
|
806 |
def update_message(request: gr.Request):
|
807 |
return f"<h2 style=' font-family: Calibri;'>Welcome, {request.username}</h4>"
|
808 |
|
809 |
-
|
810 |
-
|
811 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
812 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
813 |
|
814 |
-
# CSS for styling the buttons and other elements
|
815 |
css = """
|
|
|
816 |
/* Example of custom button styling */
|
817 |
.gr-button {
|
818 |
background-color: #6366f1; /* Change to your desired button color */
|
@@ -822,7 +1219,6 @@ css = """
|
|
822 |
padding: 10px 20px;
|
823 |
font-size: 12px;
|
824 |
cursor: pointer;
|
825 |
-
|
826 |
}
|
827 |
|
828 |
.gr-button:hover {
|
@@ -837,14 +1233,12 @@ css = """
|
|
837 |
padding: 10px 20px;
|
838 |
font-size: 14px;
|
839 |
cursor: pointer;
|
840 |
-
|
841 |
}
|
842 |
|
843 |
.gr-buttonbig:hover {
|
844 |
background-color: #8a92f7; /* Darker shade on hover */
|
845 |
}
|
846 |
|
847 |
-
|
848 |
/* Customizing the Logout link to be on the right */
|
849 |
.logout-link {
|
850 |
text-align: right;
|
@@ -857,57 +1251,106 @@ css = """
|
|
857 |
text-decoration: none;
|
858 |
font-size: 16px;
|
859 |
}
|
|
|
860 |
.chatbot_gpt {
|
861 |
-
/* width: 800px !important; Adjust width as needed */
|
862 |
height: 600px !important; /* Adjust height as needed */
|
863 |
}
|
|
|
864 |
.logout-link a:hover {
|
865 |
text-decoration: underline; /* Underline on hover */
|
866 |
}
|
|
|
867 |
.message-buttons-right{
|
868 |
display: none !important;
|
869 |
}
|
|
|
870 |
body, .gradio-container {
|
871 |
margin: 0;
|
872 |
padding: 0;
|
873 |
}
|
874 |
-
"""
|
875 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
876 |
with gr.Blocks(css=css, theme=gr.themes.Soft()) as demo:
|
877 |
gr.HTML("<CENTER><B><h1 style='font-size:30px; font-family: Calibri;'>RedMindGPT</h1></B></CENTER>")
|
|
|
|
|
|
|
|
|
878 |
with gr.Row():
|
879 |
m = gr.Markdown()
|
880 |
demo.load(update_message, None, m)
|
881 |
-
|
882 |
-
|
883 |
-
gr.Markdown("<div class='logout-link'><a href='/logout'><b>Logout</b></a></div>")
|
884 |
-
|
885 |
with gr.Row():
|
886 |
-
sample_button = gr.Button("What are the details of ASN24091600002",elem_classes="gr-buttonbig")
|
887 |
-
sample_button1 = gr.Button("What are the active warehouses available",elem_classes="gr-buttonbig")
|
888 |
-
sample_button2 = gr.Button("Explain Pre-Receiving Yard Management",elem_classes="gr-buttonbig")
|
889 |
-
sample_button3 = gr.Button("Can you generate a pie chart with item names and quantities in warehouse WH1000001",elem_classes="gr-buttonbig")
|
890 |
sample_button4 = gr.Button("Analyze item name & quantity for different customers in a stacked bar chart for the warehouse WH1000001 & send email to meetarun@gmail.com", elem_classes="gr-button")
|
891 |
|
|
|
892 |
with gr.Row():
|
893 |
-
chatbot = gr.Chatbot(label="Select any of the questions listed above to experience
|
894 |
|
|
|
895 |
with gr.Row():
|
896 |
-
with gr.Column(scale=
|
897 |
message = gr.Textbox(show_label=False, container=False, placeholder="Please enter your question")
|
898 |
-
|
899 |
with gr.Row():
|
900 |
feedback_textbox = gr.Textbox(visible=False, show_label=False, container=False, placeholder="Please enter your feedback.")
|
901 |
-
submit_feedback_button = gr.Button("Submit Feedback", visible=False,elem_classes="gr-buttonbig")
|
902 |
with gr.Column(scale=1):
|
903 |
with gr.Row():
|
904 |
-
button = gr.Button("Submit", elem_id="submit",elem_classes="gr-buttonbig")
|
905 |
-
# Button to stop the current processing
|
906 |
stop_button = gr.Button("Stop", elem_classes="gr-buttonbig")
|
907 |
-
|
908 |
-
|
909 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
910 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
911 |
button.click(handle_query, [message, chatbot], [chatbot])
|
912 |
message.submit(handle_query, [message, chatbot], [chatbot])
|
913 |
message.submit(lambda x: gr.update(value=""), None, [message], queue=False)
|
@@ -917,14 +1360,15 @@ with gr.Blocks(css=css, theme=gr.themes.Soft()) as demo:
|
|
917 |
submit_feedback_button.click(submit_feedback, [feedback_textbox, chatbot], [chatbot, feedback_textbox, submit_feedback_button])
|
918 |
submit_feedback_button.click(lambda x: gr.update(value=''), [], [feedback_textbox])
|
919 |
|
920 |
-
#sample_button.click(answer_question, [sample_button, chatbot], [chatbot])
|
921 |
sample_button.click(handle_query, [sample_button, chatbot], [chatbot])
|
922 |
sample_button1.click(handle_query, [sample_button1, chatbot], [chatbot])
|
923 |
sample_button2.click(handle_query, [sample_button2, chatbot], [chatbot])
|
924 |
sample_button3.click(handle_query, [sample_button3, chatbot], [chatbot])
|
925 |
sample_button4.click(handle_query, [sample_button4, chatbot], [chatbot])
|
926 |
|
927 |
-
|
928 |
-
|
929 |
-
|
930 |
-
|
|
|
|
|
|
11 |
import shutil
|
12 |
import json
|
13 |
import nltk
|
14 |
+
import mysql.connector
|
15 |
+
import fnmatch
|
16 |
+
# audio related code is not included based on Arun's input
|
17 |
# audio package
|
18 |
+
import speech_recognition as sr
|
19 |
from pydub import AudioSegment
|
20 |
+
from pydub.playback import play
|
21 |
+
# SMTP code is not included since HFSpaces doesn't support it
|
22 |
# email library
|
23 |
+
import smtplib, ssl
|
24 |
from email.mime.multipart import MIMEMultipart
|
25 |
from email.mime.text import MIMEText
|
26 |
from email.mime.base import MIMEBase
|
27 |
+
from email import encoders
|
28 |
# langchain
|
29 |
from langchain_core.prompts import ChatPromptTemplate
|
30 |
from langchain_core.output_parsers import StrOutputParser
|
|
|
36 |
from langchain.agents import create_tool_calling_agent, AgentExecutor, Tool
|
37 |
from langchain.text_splitter import RecursiveCharacterTextSplitter
|
38 |
from langchain.tools import StructuredTool
|
39 |
+
#from langchain.pydantic_v1 import BaseModel, Field
|
40 |
+
from pydantic import BaseModel, Field
|
41 |
from PyPDF2 import PdfReader
|
42 |
from nltk.tokenize import sent_tokenize
|
43 |
from datetime import datetime
|
44 |
from sqlalchemy import create_engine
|
45 |
from sqlalchemy.sql import text
|
46 |
+
import openai
|
47 |
|
48 |
# pandas
|
49 |
import pandas as pd
|
|
|
68 |
from mailjet_rest import Client
|
69 |
import base64
|
70 |
|
71 |
+
#for PDF form filling
|
72 |
+
from PyPDFForm import FormWrapper
|
73 |
+
|
74 |
+
#Variables Initialization
|
75 |
+
agent_executor = None
|
76 |
+
vector_store1 = None
|
77 |
+
texts1 = None
|
78 |
+
excel_dataframe = None
|
79 |
+
file_extension = None
|
80 |
+
total_rows = ""
|
81 |
+
docstatus = ""
|
82 |
+
sample_table = ""
|
83 |
+
#This is to define the summary of the runtime tool. This summary will be updated in prompt template and description of the new tool
|
84 |
+
run_time_tool_summary=""
|
85 |
+
|
86 |
# Define global variables for managing the thread and current_event
|
87 |
executor = concurrent.futures.ThreadPoolExecutor(max_workers=1)
|
88 |
+
|
89 |
current_event = None
|
90 |
stop_event = threading.Event()
|
91 |
|
|
|
94 |
os.environ["LANGFUSE_SECRET_KEY"] = os.getenv("LANGFUSE_SECRET_KEY")
|
95 |
os.environ["LANGFUSE_HOST"] = os.getenv("LANGFUSE_HOST")
|
96 |
|
97 |
+
DB_USER = 'u852023448_redmindgpt'
|
98 |
+
DB_PASSWORD = 'redmindGpt@123'
|
99 |
+
DB_HOST = '217.21.88.10'
|
100 |
+
DB_NAME = 'u852023448_redmindgpt'
|
101 |
+
|
102 |
+
|
103 |
langfuse_handler = CallbackHandler()
|
104 |
langfuse_handler.auth_check() # Optional: Checks if the authentication is successful
|
105 |
|
106 |
nltk.download('punkt')
|
107 |
|
108 |
+
open_api_key_token = os.getenv("OPENAI_API_KEY")
|
109 |
|
110 |
os.environ['OPENAI_API_KEY'] = open_api_key_token
|
111 |
pdf_path = "Inbound.pdf"
|
112 |
|
113 |
db_uri = os.getenv("POSTGRESQL_CONNECTION")
|
|
|
114 |
|
115 |
+
# Database setup
|
116 |
db = SQLDatabase.from_uri(db_uri)
|
117 |
|
118 |
user_email = ""
|
|
|
120 |
warehouse_id = ""
|
121 |
# Today's date to be populated in inventory API
|
122 |
inventory_date = datetime.today().strftime('%Y-%m-%d')
|
123 |
+
|
124 |
apis = [
|
125 |
# fetch warehouse ID
|
126 |
{
|
|
|
139 |
llm = ChatOpenAI(model="gpt-4o-mini", max_tokens=300, temperature=0.1)
|
140 |
llm_chart = OpenAI()
|
141 |
|
|
|
142 |
def get_schema(_):
|
143 |
schema_info = db.get_table_info() # This should be a string of your SQL schema
|
144 |
return schema_info
|
|
|
187 |
|
188 |
def get_ASN_data(question):
|
189 |
base_url = os.getenv("ASN_API_URL")
|
190 |
+
print(f"base_url{base_url}")
|
191 |
complete_url = f"{base_url}branchMaster.id=343&transactionUid={question}&userId=164&transactionType=ASN"
|
192 |
try:
|
193 |
response = requests.get(complete_url)
|
194 |
+
print(f"complete_url{complete_url}")
|
195 |
+
print(f"response{response}")
|
196 |
data = response.json()
|
197 |
response.raise_for_status()
|
198 |
|
|
|
275 |
texts = load_and_split_pdf(pdf_path)
|
276 |
vector_store = create_vector_store(texts)
|
277 |
|
278 |
+
def document_data_tool_runtime(question):
|
279 |
+
print(f"Document data runtime tool enter: {question} with {vector_store1}")
|
280 |
+
query_response = query_vector_store(vector_store1, question, config={"callbacks": [langfuse_handler]})
|
281 |
+
return query_response
|
282 |
|
283 |
def document_data_tool(question):
|
284 |
print(f"Document data tool enter: {question}")
|
|
|
289 |
|
290 |
# mailjet API since SMTP is not supported HF spaces
|
291 |
def send_email_with_attachment_mailjet(recipient_email, subject, body, attach_img_base64=None):
|
|
|
|
|
292 |
api_key = os.getenv("MAILJET_API_KEY")
|
293 |
api_secret = os.getenv("MAILJET_API_SECRET")
|
294 |
|
|
|
381 |
def make_api_request(url, params):
|
382 |
"""Generic function to make API GET requests and return JSON data."""
|
383 |
try:
|
|
|
|
|
384 |
response = requests.get(url, params=params)
|
385 |
response.raise_for_status() # Raises an HTTPError if the response was an error
|
386 |
return response.json() # Return the parsed JSON data
|
|
|
406 |
if data:
|
407 |
# Extracting the id for the warehouse with the name "WH"
|
408 |
warehouse_id = next((item['id'] for item in data['result'] if item['wareHouseId'] == warehouse_name), None)
|
409 |
+
|
410 |
+
if (warehouse_id):
|
411 |
+
|
|
|
412 |
# Step 3: Update the placeholder with the actual warehouse_id
|
413 |
for api in apis:
|
414 |
if "warehouseId" in api["params"]:
|
415 |
api["params"]["warehouseId"] = warehouse_id
|
416 |
+
|
|
|
|
|
|
|
417 |
data1 = make_api_request(apis[1]["url"], apis[1]["params"])
|
418 |
if (data1):
|
419 |
headers = ["S.No", "Warehouse Code", "Warehouse Name", "Customer Code", "Customer Name", "Item Code", "Item Name",
|
|
|
441 |
|
442 |
# Convert to pandas DataFrame
|
443 |
df = pd.DataFrame(table_data, columns=headers)
|
444 |
+
|
445 |
+
chart_link = chat_with_llm(df,question)
|
446 |
|
447 |
+
return chart_link
|
|
|
|
|
|
|
|
|
|
|
448 |
else:
|
449 |
return "There are no inventory details for the warehouse you have given."
|
450 |
else:
|
451 |
return "Please provide a warehouse name available in the database."
|
452 |
|
453 |
+
def chat_with_llm(df,question):
|
454 |
+
sdf = SmartDataframe(df, config={"llm": llm_chart})
|
455 |
+
llm_response = sdf.chat(question)
|
456 |
+
return llm_response
|
457 |
|
458 |
+
def bind_llm(llm, tools,prompt_template):
|
459 |
+
llm = llm.bind()
|
460 |
+
agent = create_tool_calling_agent(llm, tools, ChatPromptTemplate.from_template(prompt_template))
|
461 |
+
agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True)
|
462 |
+
return agent_executor
|
463 |
|
464 |
# Define input and output models using Pydantic
|
465 |
class QueryInput(BaseModel):
|
|
|
476 |
|
477 |
# Wrap the function with StructuredTool for better parameter handling
|
478 |
tools = [
|
479 |
+
|
480 |
StructuredTool(
|
481 |
func=get_ASN_data,
|
482 |
name="APIData",
|
|
|
517 |
]
|
518 |
|
519 |
prompt_template = f"""You are an assistant that helps with database queries, API information, and document retrieval. Your job is to provide clear, complete, and detailed responses to the following queries. Please give the output response in an user friendly way and remove "**" from the response. For example, document related queries can be answered in a clear and concise way with numbering and not as a paragraph. Database related queries should be answered with proper indentation and use numbering for the rows. ASN id related queries should be answered with proper indentation and use numbering for the rows.
|
520 |
+
|
521 |
For ASN id related questions, if the user specifies an ASN id, provide the information from the api tool. Pass only the id as input to the tool. Do not pass the entire question as input to the tool. If the details are not found, say it in a clear and concise way.
|
522 |
You are an AI assistant trained to help with warehouse management questions based on a detailed document about our WMS. The document covers various processes such as ASN handling, purchase orders, cross docking, appointment scheduling for shipments, and yard management. Please provide a complete and concise response within 200 words and Ensure that the response is not truncated and covers the essential points. When answering, focus on providing actionable insights and clear explanations related to the specific query. Please remove "**" from the response.
|
523 |
For SQL database-related questions, only use the fields available in the warehouse schema, including tables such as customer_master, efs_company_master, efs_group_company_master, efs_region_master, party_address_detail, wms_warehouse_master.
|
|
|
525 |
{{agent_scratchpad}}
|
526 |
Here is the information you need to process:
|
527 |
Question: {{input}}"""
|
528 |
+
agent_executor = bind_llm(llm,tools,prompt_template)
|
|
|
|
|
|
|
|
|
529 |
|
530 |
def ensure_temp_chart_dir():
|
531 |
temp_chart_dir = os.getenv("IMAGE_MAIN_URL")
|
532 |
if not os.path.exists(temp_chart_dir):
|
533 |
os.makedirs(temp_chart_dir)
|
534 |
|
|
|
535 |
def clean_gradio_tmp_dir():
|
536 |
tmp_dir = os.getenv("IMAGE_GRADIO_PATH")
|
537 |
if os.path.exists(tmp_dir):
|
|
|
556 |
# Clear previous stop event and current_event
|
557 |
stop_event.clear()
|
558 |
|
559 |
+
if current_event and not current_event.done():
|
560 |
+
chatbot.append(("","A query is already being processed. Please stop it before starting a new one."))
|
561 |
+
return gr.update(value=chatbot)
|
562 |
|
563 |
# Start the processing in a new thread
|
564 |
current_event = executor.submit(answer_question_thread, user_question, chatbot)
|
|
|
566 |
# Periodically check if current_event is done
|
567 |
while not current_event.done():
|
568 |
if stop_event.is_set():
|
569 |
+
#current_event.task.cancel() # Attempt to cancel the current_event
|
570 |
+
current_event.set_result((user_question, "Sorry, we encountered an error while processing your request. Please try after some time."))
|
571 |
+
current_event.cancel() # Attempt to cancel the current_event
|
572 |
+
executor.shutdown(wait=False) # Shutdown the executor
|
573 |
+
print("Current event cancelled")
|
574 |
+
print(current_event.cancelled())
|
575 |
+
|
576 |
chatbot.append((user_question, "Sorry, we encountered an error while processing your request. Please try after some time."))
|
577 |
return gr.update(value=chatbot)
|
578 |
|
|
|
607 |
return gr.update(value=chatbot)
|
608 |
|
609 |
# This function is for agent executor invoke with the option of stop
|
610 |
+
def answer_question_thread(user_question, chatbot,audio=None):
|
611 |
|
612 |
global iterations
|
613 |
iterations = 0
|
|
|
642 |
|
643 |
while iterations < max_iterations:
|
644 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
645 |
response = agent_executor.invoke({"input": user_question}, config={"callbacks": [langfuse_handler]}, early_stopping_method="generate")
|
646 |
+
|
|
|
|
|
647 |
if isinstance(response, dict):
|
648 |
response_text = response.get("output", "")
|
649 |
else:
|
|
|
734 |
|
735 |
while iterations < max_iterations:
|
736 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
737 |
response = agent_executor.invoke({"input": user_question}, config={"callbacks": [langfuse_handler]})
|
738 |
|
739 |
if isinstance(response, dict):
|
|
|
746 |
|
747 |
if iterations == max_iterations:
|
748 |
return "The agent could not generate a valid response within the iteration limit."
|
749 |
+
|
750 |
+
|
751 |
|
752 |
if os.getenv("IMAGE_PATH") in response_text:
|
753 |
# Open the image file
|
|
|
759 |
img_str = base64.b64encode(buffered.getvalue()).decode("utf-8")
|
760 |
|
761 |
img = f'<img src="data:image/png;base64,{img_str}" style="width:450px; height:400px;">'
|
762 |
+
|
763 |
chatbot.append((user_question, img))
|
764 |
|
765 |
email_pattern = r'[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\.[a-zA-Z]{2,}'
|
|
|
795 |
print(f"Error loading image file: {e}")
|
796 |
chatbot.append((user_question, "Chart generation failed. Please try again."))
|
797 |
return gr.update(value=chatbot)
|
798 |
+
|
|
|
|
|
|
|
799 |
else:
|
800 |
chatbot.append((user_question, response_text))
|
801 |
return gr.update(value=chatbot)
|
802 |
+
|
803 |
+
|
804 |
+
def submit_feedback(feedback, chatbot, request: gr.Request):
|
|
|
805 |
gr.Info("Thank you for your feedback.")
|
806 |
+
#save feedback with user question and response in database
|
807 |
+
save_feedback(request.username,chatbot[-1][0], chatbot[-1][1], feedback)
|
808 |
feedback_response = "User feedback: " + feedback
|
809 |
return chatbot + [(feedback_response, None)], gr.update(visible=False), gr.update(visible=False)
|
810 |
|
811 |
+
|
812 |
+
# Function to connect to MySQL database
|
813 |
+
def connect_to_db():
|
814 |
+
return mysql.connector.connect(
|
815 |
+
host=DB_HOST,
|
816 |
+
user=DB_USER,
|
817 |
+
password=DB_PASSWORD,
|
818 |
+
database=DB_NAME
|
819 |
+
)
|
820 |
+
|
821 |
+
# Function to save feedback to the database
|
822 |
+
def save_feedback(username, user_question, user_response, feedback):
|
823 |
+
try:
|
824 |
+
conn = connect_to_db()
|
825 |
+
cursor = conn.cursor()
|
826 |
+
query = "INSERT INTO user_feedback (username, question, response, feedback) VALUES (%s, %s, %s, %s)"
|
827 |
+
cursor.execute(query, (username, user_question, user_response, feedback))
|
828 |
+
conn.commit()
|
829 |
+
except mysql.connector.Error as err:
|
830 |
+
print(f"Error: {err}")
|
831 |
+
finally:
|
832 |
+
if cursor:
|
833 |
+
cursor.close()
|
834 |
+
if conn:
|
835 |
+
conn.close()
|
836 |
+
|
837 |
def handle_dislike(data: gr.LikeData):
|
838 |
if not data.liked:
|
839 |
print("downvote")
|
|
|
847 |
def update_message(request: gr.Request):
|
848 |
return f"<h2 style=' font-family: Calibri;'>Welcome, {request.username}</h4>"
|
849 |
|
850 |
+
# Function to generate a 50-word summary of the newly uploaded doc using OpenAI
|
851 |
+
def generate_summary(text):
|
852 |
+
prompt = (
|
853 |
+
"You are an AI that helps with document analysis. Please provide a concise title and a summary of the following document. "
|
854 |
+
"The summary should be about 50 words and include key details that can help answer questions accurately:\n\n"
|
855 |
+
f"{text}\n\nTitle : Summary"
|
856 |
+
)
|
857 |
+
# Call the OpenAI API to generate a summary
|
858 |
+
response = openai.chat.completions.create(
|
859 |
+
messages=[
|
860 |
+
{
|
861 |
+
"role": "user",
|
862 |
+
"content": prompt,
|
863 |
+
}
|
864 |
+
],
|
865 |
+
model="gpt-4o-mini",
|
866 |
+
)
|
867 |
+
# Extract the title and summary from the response
|
868 |
+
response_content = response.choices[0].message.content
|
869 |
+
lines = response_content.split("\n")
|
870 |
+
# Extract title
|
871 |
+
title_line = lines[0]
|
872 |
+
title = title_line.split("**Title:**")[-1].strip()
|
873 |
+
|
874 |
+
# Extract summary
|
875 |
+
summary_line = lines[2]
|
876 |
+
summary = summary_line.split("**Summary:**")[-1].strip()
|
877 |
+
|
878 |
+
return title, summary
|
879 |
+
#function to handle file upload decide whether excel or doc is uploaded and respective tool will be created with appropriate prompts at runtime
|
880 |
+
def upload_file(filepath):
|
881 |
+
global vector_store1, file_extension
|
882 |
+
|
883 |
+
# Get the file extension
|
884 |
+
_, file_extension = os.path.splitext(filepath)
|
885 |
+
|
886 |
+
if file_extension == ".pdf":
|
887 |
+
texts1 = load_and_split_pdf(filepath)
|
888 |
+
|
889 |
+
vector_store1 = create_vector_store(texts1)
|
890 |
+
# Generate a 50-word summary from the extracted text
|
891 |
+
title, summary = generate_summary(texts1)
|
892 |
+
return title, summary, file_extension
|
893 |
+
elif file_extension == ".xlsx":
|
894 |
+
title, prompt = process_excel(filepath)
|
895 |
+
return title, prompt
|
896 |
+
|
897 |
+
def generate_example_questions(sheet_name, column_headers):
|
898 |
+
"""
|
899 |
+
Generates natural language questions based on column headers.
|
900 |
+
|
901 |
+
Args:
|
902 |
+
sheet_name (str): The name of the Excel sheet.
|
903 |
+
column_headers (list): List of column headers from the sheet.
|
904 |
+
|
905 |
+
Returns:
|
906 |
+
questions (list): List of generated questions based on the columns.
|
907 |
+
"""
|
908 |
+
questions = []
|
909 |
+
|
910 |
+
# Check for typical columns and create questions
|
911 |
+
if 'Product Name' in column_headers or 'Product' in column_headers:
|
912 |
+
questions.append(f"What is the total sales for a specific product in {sheet_name}?")
|
913 |
+
|
914 |
+
if 'Sales Amount' in column_headers or 'Amount' in column_headers:
|
915 |
+
questions.append(f"What is the total sales amount for a specific region in {sheet_name}?")
|
916 |
+
|
917 |
+
if 'Region' in column_headers:
|
918 |
+
questions.append(f"Which region had the highest sales in {sheet_name}?")
|
919 |
+
|
920 |
+
if 'Date' in column_headers:
|
921 |
+
questions.append(f"What were the total sales during a specific month in {sheet_name}?")
|
922 |
+
|
923 |
+
if 'Price' in column_headers:
|
924 |
+
questions.append(f"What is the price of a specific product in {sheet_name}?")
|
925 |
+
|
926 |
+
if any(fnmatch.fnmatch(header, 'Employee*') for header in column_headers):
|
927 |
+
questions.append(f"What are the details of the distinct broker names?")
|
928 |
+
|
929 |
+
return questions
|
930 |
+
|
931 |
+
def generate_prompt_from_excel_file(df_dict):
|
932 |
+
"""
|
933 |
+
Generates a prompt from an Excel file containing multiple sheets.
|
934 |
+
|
935 |
+
Args:
|
936 |
+
excel_file_path (str): The path to the Excel file.
|
937 |
+
|
938 |
+
Returns:
|
939 |
+
prompt (str): A detailed prompt including sheet names, column headers, sample data,
|
940 |
+
and example questions for each sheet.
|
941 |
+
"""
|
942 |
+
|
943 |
+
# Initialize prompt with basic structure
|
944 |
+
prompt = "You have been provided with an Excel file containing data in several sheets.\n"
|
945 |
+
|
946 |
+
# Loop through each sheet to extract column headers and sample data
|
947 |
+
for sheet_name, sheet_df in df_dict.items():
|
948 |
+
# Extract column headers
|
949 |
+
column_headers = list(sheet_df.columns)
|
950 |
+
|
951 |
+
# Get a sample of the data (first few rows)
|
952 |
+
sample_data = sheet_df.head(3).to_string(index=False)
|
953 |
+
|
954 |
+
# Add sheet details to the prompt
|
955 |
+
prompt += f"For the sheet '{sheet_name}', the column headers are:"
|
956 |
+
prompt += f"{', '.join(column_headers)}\n\n"
|
957 |
+
#prompt += f"Example data from sheet '{sheet_name}':\n"
|
958 |
+
#prompt += f"{sample_data}\n\n"
|
959 |
+
|
960 |
+
# Generate example natural language questions based on columns
|
961 |
+
example_questions = generate_example_questions(sheet_name, column_headers)
|
962 |
+
#prompt += "### Example Questions:\n"
|
963 |
+
#for question in example_questions:
|
964 |
+
# prompt += f"- {question}\n"
|
965 |
+
#prompt += "\n"
|
966 |
+
|
967 |
+
# Finalize the prompt with function call description
|
968 |
+
|
969 |
+
prompt += f"- Query: A natural language question (e.g., List all the employees with broker name ADP or Alerus). The question should be sent as 'What are the employee details with broker name ADP or Alerus :'."
|
970 |
+
prompt += f"""Output : {docstatus}. Here is the sample table:
|
971 |
+
{sample_table}.
|
972 |
+
"""
|
973 |
+
|
974 |
+
prompt += f"- Query: A natural language question with request to create LOA document (e.g., can you create LOA document for all the employees with broker name ADP or Alerus). The question should be sent as 'What are the employee details with broker name ADP or Alerus : LOA document'."
|
975 |
+
prompt += f"""Output: {docstatus}. Here is the sample table:
|
976 |
+
{sample_table}.
|
977 |
+
If there is any error, please display the message returned by the function as response. """
|
978 |
+
|
979 |
+
|
980 |
+
return "Excel data", prompt
|
981 |
|
982 |
+
# Function to handle "Add to RedMindGPT" button click
|
983 |
+
def add_to_redmindgpt(title, summary):
|
984 |
+
"""
|
985 |
+
Adds a document or Excel file to the RedmindGPT system and configures the appropriate runtime tool for handling related queries.
|
986 |
+
Parameters:
|
987 |
+
title (str): The title of the document or Excel file.
|
988 |
+
summary (str): A brief summary of the document or Excel file.
|
989 |
+
Returns:
|
990 |
+
str: A message indicating whether the file has been added successfully.
|
991 |
+
Behavior:
|
992 |
+
- If the file extension is ".pdf", it sets up a runtime tool for handling document-related queries.
|
993 |
+
- If the file extension is ".xlsx", it sets up a runtime tool for handling Excel data-related queries.
|
994 |
+
- Configures the prompt template for the agent executor based on the file type.
|
995 |
+
- Adds the configured runtime tool to the list of tools used by the agent executor.
|
996 |
+
"""
|
997 |
+
|
998 |
+
global agent_executor, file_extension
|
999 |
+
|
1000 |
+
if file_extension == ".pdf":
|
1001 |
+
run_time_tool_summary = f"For {title} document related questions, Please refer runtimeDocumentData tool. {summary}. Please provide a complete and concise response within 200 words and Ensure that the response is not truncated and covers the essential points."
|
1002 |
+
|
1003 |
+
run_time_tool = StructuredTool(
|
1004 |
+
func=document_data_tool_runtime,
|
1005 |
+
name="runtimeDocumentData",
|
1006 |
+
args_schema=QueryInput,
|
1007 |
+
output_schema=QueryOutput,
|
1008 |
+
description=f"You are an AI assistant trained to help with the questions based on the uploaded document {title}. {summary}. Please provide a complete and concise response within 200 words and Ensure that the response is not truncated and covers the essential points."
|
1009 |
+
)
|
1010 |
+
|
1011 |
+
# Add the new tool to the beginning
|
1012 |
+
tools.insert(0, run_time_tool)
|
1013 |
+
|
1014 |
+
prompt_template = f"""You are an assistant that helps with database queries, API information, and document retrieval. Your job is to provide clear, complete, and detailed responses to the following queries. Please give the output response in an user friendly way and remove "**" from the response. For example, document related queries can be answered in a clear and concise way with numbering and not as a paragraph. Database related queries should be answered with proper indentation and use numbering for the rows. ASN id related queries should be answered with proper indentation and use numbering for the rows.
|
1015 |
+
{run_time_tool_summary}
|
1016 |
+
For ASN id related questions, if the user specifies an ASN id, provide the information from the api tool. Pass only the id as input to the tool. Do not pass the entire question as input to the tool. If the details are not found, say it in a clear and concise way.
|
1017 |
+
You are an AI assistant trained to help with warehouse management questions based on a detailed document about our WMS. The document covers various processes such as ASN handling, purchase orders, cross docking, appointment scheduling for shipments, and yard management. Please provide a complete and concise response within 200 words and Ensure that the response is not truncated and covers the essential points. When answering, focus on providing actionable insights and clear explanations related to the specific query. Please remove "**" from the response.
|
1018 |
+
For SQL database-related questions, only use the fields available in the warehouse schema, including tables such as customer_master, efs_company_master, efs_group_company_master, efs_region_master, party_address_detail, wms_warehouse_master.
|
1019 |
+
For datavisualization, user will ask for inventory report of a particular warehouse. Your job is to return the image path to chat interface and display the image as output.
|
1020 |
+
|
1021 |
+
{{agent_scratchpad}}
|
1022 |
+
Here is the information you need to process:
|
1023 |
+
Question: {{input}}"""
|
1024 |
+
agent_executor = bind_llm(llm,tools,prompt_template)
|
1025 |
+
return f"File has been added successfully."
|
1026 |
+
elif file_extension == ".xlsx":
|
1027 |
+
run_time_excel_tool_summary = f"For {title} related questions, Please refer runtimeExcelData tool. {summary}. Display the response only in the format as mentioned in the tool description. "
|
1028 |
+
|
1029 |
+
run_time_excel_tool = StructuredTool(
|
1030 |
+
func=chat_with_excel_data_dataframe,
|
1031 |
+
name="runtimeExcelData",
|
1032 |
+
args_schema=QueryInput,
|
1033 |
+
output_schema=QueryOutput,
|
1034 |
+
description=f"""You are an AI assistant trained to handle Excel data and return meaningful insights. If user query is given with an option of generating the document with the result set dataframe, pass two inputs to the tool. First input is the user query and the second input will be the phrase "create document". display the response only in the below format.
|
1035 |
+
{docstatus}. Here is the sample data:
|
1036 |
+
{sample_table}.
|
1037 |
+
Please provide the total rows count from the {total_rows} values returned by the function and not the count of sample table rows. If there is any error, please display the message returned by the function as response. """
|
1038 |
+
)
|
1039 |
+
|
1040 |
+
# Add the new tool to the beginning
|
1041 |
+
tools.insert(0, run_time_excel_tool)
|
1042 |
+
|
1043 |
+
prompt_template = f"""You are an assistant that helps with database queries, API information, and document retrieval. Your job is to provide clear, complete, and detailed responses to the following queries. Please give the output response in an user friendly way and remove "**" from the response. For example, document related queries can be answered in a clear and concise way with numbering and not as a paragraph. Database related queries should be answered with proper indentation and use numbering for the rows. ASN id related queries should be answered with proper indentation and use numbering for the rows.
|
1044 |
+
{run_time_excel_tool_summary}
|
1045 |
+
For ASN id related questions, if the user specifies an ASN id, provide the information from the api tool. Pass only the id as input to the tool. Do not pass the entire question as input to the tool. If the details are not found, say it in a clear and concise way.
|
1046 |
+
You are an AI assistant trained to help with warehouse management questions based on a detailed document about our WMS. The document covers various processes such as ASN handling, purchase orders, cross docking, appointment scheduling for shipments, and yard management. Please provide a complete and concise response within 200 words and Ensure that the response is not truncated and covers the essential points. When answering, focus on providing actionable insights and clear explanations related to the specific query. Please remove "**" from the response.
|
1047 |
+
For SQL database-related questions, only use the fields available in the warehouse schema, including tables such as customer_master, efs_company_master, efs_group_company_master, efs_region_master, party_address_detail, wms_warehouse_master.
|
1048 |
+
For datavisualization, user will ask for inventory report of a particular warehouse. Your job is to return the image path to chat interface and display the image as output.
|
1049 |
+
|
1050 |
+
{{agent_scratchpad}}
|
1051 |
+
Here is the information you need to process:
|
1052 |
+
Question: {{input}}"""
|
1053 |
+
agent_executor = bind_llm(llm,tools,prompt_template)
|
1054 |
+
return f"File has been added successfully."
|
1055 |
+
|
1056 |
+
def process_excel(file):
|
1057 |
+
global excel_dataframe
|
1058 |
+
# Check if the file is None
|
1059 |
+
if file is None:
|
1060 |
+
return "Excel file", "Your excel does not have values. Please upload a different file." # Return an empty dataframe if no file is uploaded
|
1061 |
+
else:
|
1062 |
+
# Read the uploaded Excel file
|
1063 |
+
excel_dataframe = pd.read_excel(file.name, sheet_name=None) # 'file.name' to get the actual file path
|
1064 |
+
|
1065 |
+
#to get title and summary of excel file
|
1066 |
+
title, prompt = generate_prompt_from_excel_file(excel_dataframe)
|
1067 |
+
excel_dataframe = pd.read_excel(file.name)
|
1068 |
+
|
1069 |
+
return title, prompt # Return the success message.
|
1070 |
+
|
1071 |
+
def chat_with_excel_data(question):
|
1072 |
+
global excel_dataframe
|
1073 |
+
response_dataframe = chat_with_llm(excel_dataframe,question)
|
1074 |
+
print(response_dataframe)
|
1075 |
+
return response_dataframe
|
1076 |
+
|
1077 |
+
def chat_with_excel_data_dataframe(question):
|
1078 |
+
isDataFrame = True
|
1079 |
+
print(f"question for excel data frame : {question}")
|
1080 |
+
if "LOA" in question:
|
1081 |
+
#question = question.replace("create document", "").strip()
|
1082 |
+
create_document = True
|
1083 |
+
else:
|
1084 |
+
create_document = False
|
1085 |
+
print(f"create document : {create_document}")
|
1086 |
+
response_dataframe = chat_with_excel_data(question)
|
1087 |
+
if isinstance(response_dataframe, pd.DataFrame) == False:
|
1088 |
+
|
1089 |
+
print("The result is not a DataFrame.")
|
1090 |
+
if ":" in response_dataframe:
|
1091 |
+
isDataFrame = False
|
1092 |
+
names_part = response_dataframe.split(":", 1)[1] # Get everything after the colon and space
|
1093 |
+
|
1094 |
+
# Split the names by commas to create a list
|
1095 |
+
names = names_part.split(",")
|
1096 |
+
|
1097 |
+
# Convert the list of names to a DataFrame
|
1098 |
+
response_dataframe = pd.DataFrame(names, columns=["Result"])
|
1099 |
+
|
1100 |
+
|
1101 |
+
#handle large dataset
|
1102 |
+
response = handle_large_dataset(response_dataframe, create_document,isDataFrame)
|
1103 |
+
|
1104 |
+
return response
|
1105 |
+
|
1106 |
+
#Save the respnse dataframe to an Excel file in hostinger so that the user can download it
|
1107 |
+
#save_file_path = "dataframe_output.xlsx"
|
1108 |
+
#response_dataframe.to_excel(save_file_path, index=False)
|
1109 |
+
#save_file_to_hostinger(save_file_path)
|
1110 |
+
|
1111 |
+
# Check if the response is a DataFrame
|
1112 |
+
"""if isinstance(response_dataframe, pd.DataFrame):
|
1113 |
+
# Convert DataFrame to HTML for display
|
1114 |
+
df_html = response_dataframe.to_html(classes='dataframe', index=False)
|
1115 |
+
print(f"dfhtml:{df_html}")
|
1116 |
+
return df_html"""
|
1117 |
+
|
1118 |
+
#return response_dataframe.head(10)#, len(response_dataframe)
|
1119 |
+
|
1120 |
+
def save_file_to_hostinger(save_file_path):
|
1121 |
+
from ftplib import FTP
|
1122 |
+
# Step 2: FTP server credentials
|
1123 |
+
ftp_host = 'ftp.redmindtechnologies.com' # Replace with your FTP server address
|
1124 |
+
ftp_user = 'u852023448.redmindGpt' # Replace with your FTP username
|
1125 |
+
ftp_pass = 'RedMind@505' # Replace with your FTP password
|
1126 |
+
remote_file_path = '/RedMindGPT/output.xlsx' # Replace with the desired path on the server
|
1127 |
+
|
1128 |
+
# Create an FTP connection
|
1129 |
+
ftp = FTP(ftp_host)
|
1130 |
+
ftp.login(ftp_user, ftp_pass)
|
1131 |
+
|
1132 |
+
# Open the local file and upload it to the server
|
1133 |
+
with open(save_file_path, 'rb') as file:
|
1134 |
+
ftp.storbinary(f'STOR {remote_file_path}', file)
|
1135 |
+
|
1136 |
+
print(f'File {save_file_path} uploaded to {remote_file_path} on server.')
|
1137 |
+
|
1138 |
+
# Close the FTP connection
|
1139 |
+
ftp.quit()
|
1140 |
+
|
1141 |
+
def handle_large_dataset(df, create_document,isDataFrame):
|
1142 |
+
|
1143 |
+
total_rows = len(df)
|
1144 |
+
#print(df)
|
1145 |
+
print(f"Total rows: {total_rows}")
|
1146 |
+
docstatus = f"Download the complete dataset <a href='https://redmindtechnologies.com/RedMindGPT/output.xlsx' download> here.</a>.There are total of {total_rows} rows."
|
1147 |
+
if total_rows < 4000:
|
1148 |
+
|
1149 |
+
# 1. Limit to first 10 rows
|
1150 |
+
|
1151 |
+
|
1152 |
+
# 2. Handle missing values
|
1153 |
+
#limited_data.fillna("N/A", inplace=True)
|
1154 |
+
# 3. Drop the original first column
|
1155 |
+
if len(df.columns) > 1:
|
1156 |
+
# Skipping the original first column
|
1157 |
+
limited_data = df.head(3)
|
1158 |
+
limited_data_without_first_column = limited_data.iloc[:, 1:]
|
1159 |
+
else:
|
1160 |
+
limited_data = df.head(20)
|
1161 |
+
limited_data_without_first_column = limited_data
|
1162 |
+
#print( "range "+ len(limited_data_without_first_column))
|
1163 |
+
# 4. Add SNo (serial number) as the first column, starting from 1
|
1164 |
+
if isDataFrame :
|
1165 |
+
|
1166 |
+
limited_data_without_first_column.insert(0, 'SNo', range(1, len(limited_data_without_first_column) + 1))
|
1167 |
+
else:
|
1168 |
+
|
1169 |
+
limited_data_without_first_column.insert(0, 'SNo', range(1, len(limited_data) + 1))
|
1170 |
+
# 3. Save the full dataset to a downloadable file
|
1171 |
+
df.to_excel('output_data.xlsx', index=False)
|
1172 |
+
save_file_to_hostinger('output_data.xlsx')
|
1173 |
+
# 4. Create a summary and table of the first 10 rows for display
|
1174 |
+
|
1175 |
+
#columns = list(df.columns)
|
1176 |
+
sample_table = limited_data_without_first_column.to_markdown()
|
1177 |
+
#print(sample_table)
|
1178 |
+
if create_document:
|
1179 |
+
#Logic to generate pdfs with employee name and account number
|
1180 |
+
for index, row in df.iterrows():
|
1181 |
+
# Create a PDF for each row
|
1182 |
+
create_pdf(row['Account Name'], row['Account ID'])
|
1183 |
+
create_document = False
|
1184 |
+
docstatus += f" {total_rows} documents are created successfully."
|
1185 |
+
print(sample_table)
|
1186 |
+
# 5. Return the summary and downloadable link
|
1187 |
+
#return f"""
|
1188 |
+
#There are a total of {total_rows} rows. Please download the complete dataset here: <a href="https://redmindtechnologies.com/RedMindGPT/output.xlsx" download>Download</a>. Here are the first 3 rows:
|
1189 |
+
#{sample_table} """
|
1190 |
+
|
1191 |
+
return sample_table, docstatus
|
1192 |
+
|
1193 |
+
else:
|
1194 |
+
return "Your query returns a large dataset which is not supported in the current version. Please try a different query."
|
1195 |
+
def create_pdf(name,id):
|
1196 |
+
|
1197 |
+
|
1198 |
+
filled = FormWrapper("Goldman_LOA - Gold.pdf").fill(
|
1199 |
+
{
|
1200 |
+
"Title of Account": name,
|
1201 |
+
"Account Number": id,
|
1202 |
+
"Print Name and Title": name
|
1203 |
+
},
|
1204 |
+
)
|
1205 |
+
output_file_name = f"documents\\{name}.pdf"
|
1206 |
+
with open(output_file_name, "wb+") as output:
|
1207 |
+
output.write(filled.read())
|
1208 |
+
return f"{output_file_name} is created successfully."
|
1209 |
+
|
1210 |
|
|
|
1211 |
css = """
|
1212 |
+
|
1213 |
/* Example of custom button styling */
|
1214 |
.gr-button {
|
1215 |
background-color: #6366f1; /* Change to your desired button color */
|
|
|
1219 |
padding: 10px 20px;
|
1220 |
font-size: 12px;
|
1221 |
cursor: pointer;
|
|
|
1222 |
}
|
1223 |
|
1224 |
.gr-button:hover {
|
|
|
1233 |
padding: 10px 20px;
|
1234 |
font-size: 14px;
|
1235 |
cursor: pointer;
|
|
|
1236 |
}
|
1237 |
|
1238 |
.gr-buttonbig:hover {
|
1239 |
background-color: #8a92f7; /* Darker shade on hover */
|
1240 |
}
|
1241 |
|
|
|
1242 |
/* Customizing the Logout link to be on the right */
|
1243 |
.logout-link {
|
1244 |
text-align: right;
|
|
|
1251 |
text-decoration: none;
|
1252 |
font-size: 16px;
|
1253 |
}
|
1254 |
+
|
1255 |
.chatbot_gpt {
|
|
|
1256 |
height: 600px !important; /* Adjust height as needed */
|
1257 |
}
|
1258 |
+
|
1259 |
.logout-link a:hover {
|
1260 |
text-decoration: underline; /* Underline on hover */
|
1261 |
}
|
1262 |
+
|
1263 |
.message-buttons-right{
|
1264 |
display: none !important;
|
1265 |
}
|
1266 |
+
|
1267 |
body, .gradio-container {
|
1268 |
margin: 0;
|
1269 |
padding: 0;
|
1270 |
}
|
|
|
1271 |
|
1272 |
+
/* Styling the tab header with a blue background */
|
1273 |
+
.gr-tab-header {
|
1274 |
+
background-color: #4A90E2; /* Blue background for the tab header */
|
1275 |
+
padding: 10px;
|
1276 |
+
border-radius: 8px;
|
1277 |
+
color: white;
|
1278 |
+
font-size: 16px;
|
1279 |
+
}
|
1280 |
+
|
1281 |
+
/* Styling the selected tab text color to be green */
|
1282 |
+
.gr-tab-header .gr-tab-active {
|
1283 |
+
color: green; /* Change selected tab text to green */
|
1284 |
+
}
|
1285 |
+
|
1286 |
+
/* Keep non-selected tab text color white */
|
1287 |
+
.gr-tab-header .gr-tab {
|
1288 |
+
color: white;
|
1289 |
+
}
|
1290 |
+
|
1291 |
+
/* Custom CSS for reducing the size of the video element */
|
1292 |
+
.video-player {
|
1293 |
+
width: 500px; /* Set a custom width for the video */
|
1294 |
+
height: 350px; /* Set a custom height for the video */
|
1295 |
+
margin: 0 auto; /* Center the video horizontally */
|
1296 |
+
}
|
1297 |
+
"""
|
1298 |
with gr.Blocks(css=css, theme=gr.themes.Soft()) as demo:
|
1299 |
gr.HTML("<CENTER><B><h1 style='font-size:30px; font-family: Calibri;'>RedMindGPT</h1></B></CENTER>")
|
1300 |
+
# Logout link styled as text link in the right corner
|
1301 |
+
gr.Markdown("<div class='logout-link'><a href='/logout'><b>Logout</b></a></div>")
|
1302 |
+
|
1303 |
+
# Unified RedMindGPT Interface
|
1304 |
with gr.Row():
|
1305 |
m = gr.Markdown()
|
1306 |
demo.load(update_message, None, m)
|
1307 |
+
|
1308 |
+
# Buttons for sample queries
|
|
|
|
|
1309 |
with gr.Row():
|
1310 |
+
sample_button = gr.Button("What are the details of ASN24091600002", elem_classes="gr-buttonbig")
|
1311 |
+
sample_button1 = gr.Button("What are the active warehouses available", elem_classes="gr-buttonbig")
|
1312 |
+
sample_button2 = gr.Button("Explain Pre-Receiving Yard Management", elem_classes="gr-buttonbig")
|
1313 |
+
sample_button3 = gr.Button("Can you generate a pie chart with item names and quantities in warehouse WH1000001", elem_classes="gr-buttonbig")
|
1314 |
sample_button4 = gr.Button("Analyze item name & quantity for different customers in a stacked bar chart for the warehouse WH1000001 & send email to meetarun@gmail.com", elem_classes="gr-button")
|
1315 |
|
1316 |
+
# Chatbot component
|
1317 |
with gr.Row():
|
1318 |
+
chatbot = gr.Chatbot(label="Select any of the questions listed above to experience RedMindGPT in action.", elem_classes="chatbot_gpt")
|
1319 |
|
1320 |
+
# Textbox for user questions
|
1321 |
with gr.Row():
|
1322 |
+
with gr.Column(scale=1):
|
1323 |
message = gr.Textbox(show_label=False, container=False, placeholder="Please enter your question")
|
1324 |
+
|
1325 |
with gr.Row():
|
1326 |
feedback_textbox = gr.Textbox(visible=False, show_label=False, container=False, placeholder="Please enter your feedback.")
|
1327 |
+
submit_feedback_button = gr.Button("Submit Feedback", visible=False, elem_classes="gr-buttonbig")
|
1328 |
with gr.Column(scale=1):
|
1329 |
with gr.Row():
|
1330 |
+
button = gr.Button("Submit", elem_id="submit", elem_classes="gr-buttonbig")
|
|
|
1331 |
stop_button = gr.Button("Stop", elem_classes="gr-buttonbig")
|
1332 |
+
# Rearranged to place Upload Doc and Upload Excel in the same row
|
1333 |
+
with gr.Row():
|
1334 |
+
with gr.Column(scale=1):
|
1335 |
+
# File Upload Section
|
1336 |
+
gr.Markdown("**Add a document or Excel for natural language interaction.**")
|
1337 |
+
with gr.Column(scale=1):
|
1338 |
+
u = gr.UploadButton("Upload a doc/excel", file_count="single", elem_classes="gr-buttonbig")
|
1339 |
+
#excel_file = gr.UploadButton("Upload an excel", file_count="single", elem_classes="gr-buttonbig", file_types=[".xlsx", ".xls"])
|
1340 |
+
with gr.Column(scale=1):
|
1341 |
+
add_button = gr.Button("Add to RedMindGPT", elem_classes="gr-buttonbig", visible=False)
|
1342 |
+
with gr.Row():
|
1343 |
+
title_textbox = gr.Textbox(label="Title", visible=False)
|
1344 |
+
summary_textarea = gr.Textbox(label="Summary", lines=5, visible=False)
|
1345 |
+
|
1346 |
|
1347 |
+
output_message = gr.Markdown() # Markdown to display output message
|
1348 |
+
success_message = gr.Markdown() # Placeholder for messages
|
1349 |
+
|
1350 |
+
|
1351 |
+
# Moved function calling lines to the end
|
1352 |
+
stop_button.click(stop_processing, [chatbot], [chatbot])
|
1353 |
+
|
1354 |
button.click(handle_query, [message, chatbot], [chatbot])
|
1355 |
message.submit(handle_query, [message, chatbot], [chatbot])
|
1356 |
message.submit(lambda x: gr.update(value=""), None, [message], queue=False)
|
|
|
1360 |
submit_feedback_button.click(submit_feedback, [feedback_textbox, chatbot], [chatbot, feedback_textbox, submit_feedback_button])
|
1361 |
submit_feedback_button.click(lambda x: gr.update(value=''), [], [feedback_textbox])
|
1362 |
|
|
|
1363 |
sample_button.click(handle_query, [sample_button, chatbot], [chatbot])
|
1364 |
sample_button1.click(handle_query, [sample_button1, chatbot], [chatbot])
|
1365 |
sample_button2.click(handle_query, [sample_button2, chatbot], [chatbot])
|
1366 |
sample_button3.click(handle_query, [sample_button3, chatbot], [chatbot])
|
1367 |
sample_button4.click(handle_query, [sample_button4, chatbot], [chatbot])
|
1368 |
|
1369 |
+
u.upload(upload_file, u, [title_textbox, summary_textarea])
|
1370 |
+
u.upload(lambda _: (gr.update(visible=True), gr.update(visible=True), gr.update(visible=True)), None, [title_textbox, summary_textarea, add_button])
|
1371 |
+
add_button.click(add_to_redmindgpt, [title_textbox, summary_textarea], output_message)
|
1372 |
+
add_button.click(lambda _: (gr.update(visible=False), gr.update(visible=False), gr.update(visible=False)), None, [title_textbox, summary_textarea, add_button])
|
1373 |
+
|
1374 |
+
demo.launch(auth=[("lakshmi", "redmind"), ("arun", "redmind"), ("NewageGlobal", "Newage123$")], auth_message="RedMindGPT", inline=False)
|