Spaces:
Sleeping
Sleeping
Upload 3 files
Browse files- Inbound.pdf +0 -0
- app.py +460 -0
- requirements.txt +15 -0
Inbound.pdf
ADDED
Binary file (162 kB). View file
|
|
app.py
ADDED
@@ -0,0 +1,460 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# -*- coding: utf-8 -*-
|
2 |
+
"""Chat_with_agent_v4.ipynb
|
3 |
+
Automatically generated by Colab.
|
4 |
+
Original file is located at
|
5 |
+
https://colab.research.google.com/drive/1T5Buj_yHaAnfoO__2-gCFDSvBVheiHrF
|
6 |
+
"""
|
7 |
+
|
8 |
+
from PIL import Image
|
9 |
+
import base64
|
10 |
+
from io import BytesIO
|
11 |
+
import os
|
12 |
+
import requests
|
13 |
+
import gradio as gr
|
14 |
+
#import nltk
|
15 |
+
from langchain_core.prompts import ChatPromptTemplate
|
16 |
+
from langchain_core.output_parsers import StrOutputParser
|
17 |
+
from langchain_core.runnables import RunnableSequence, RunnableLambda
|
18 |
+
from langchain_openai import ChatOpenAI
|
19 |
+
from langchain_openai import OpenAIEmbeddings
|
20 |
+
from langchain_community.vectorstores import FAISS
|
21 |
+
from langchain_community.utilities import SQLDatabase
|
22 |
+
from langchain.agents import create_tool_calling_agent, AgentExecutor, Tool
|
23 |
+
from langchain.text_splitter import RecursiveCharacterTextSplitter
|
24 |
+
from langchain.tools import StructuredTool
|
25 |
+
from langchain.pydantic_v1 import BaseModel, Field
|
26 |
+
from PyPDF2 import PdfReader
|
27 |
+
from nltk.tokenize import sent_tokenize
|
28 |
+
from sqlalchemy import create_engine
|
29 |
+
from sqlalchemy.sql import text
|
30 |
+
import json
|
31 |
+
import nltk
|
32 |
+
nltk.download('punkt')
|
33 |
+
|
34 |
+
open_api_key_token = os.environ['OPEN_AI_API']
|
35 |
+
|
36 |
+
os.environ['OPENAI_API_KEY'] = open_api_key_token
|
37 |
+
db_uri = 'postgresql+psycopg2://postgres:postpass@193.203.162.39:5432/warehouseAi'
|
38 |
+
# Database setup
|
39 |
+
|
40 |
+
db = SQLDatabase.from_uri(db_uri)
|
41 |
+
|
42 |
+
# LLM setup
|
43 |
+
#llm = ChatOpenAI(model="gpt-3.5-turbo-0125",max_tokens=150,temperature=0.1)
|
44 |
+
llm = ChatOpenAI(model="gpt-4o-mini",max_tokens=250,temperature=0.1)
|
45 |
+
|
46 |
+
def get_schema(_):
|
47 |
+
schema_info = db.get_table_info() # This should be a string of your SQL schema
|
48 |
+
return schema_info
|
49 |
+
|
50 |
+
def generate_sql_query(question):
|
51 |
+
schema = get_schema(None)
|
52 |
+
template_query_generation = """
|
53 |
+
Schema: {schema}
|
54 |
+
Question: {question}
|
55 |
+
Provide a SQL query to answer the above question using the exact field names and table names specified in the schema.
|
56 |
+
SQL Query (Please provide only the SQL statement without explanations or formatting):
|
57 |
+
"""
|
58 |
+
prompt_query_generation = ChatPromptTemplate.from_template(template_query_generation)
|
59 |
+
schema_and_question = RunnableLambda(lambda _: {'schema': schema, 'question': question})
|
60 |
+
sql_chain = RunnableSequence(
|
61 |
+
schema_and_question,
|
62 |
+
prompt_query_generation,
|
63 |
+
llm.bind(stop=["SQL Query End"]), # Adjust the stop sequence to your need
|
64 |
+
StrOutputParser()
|
65 |
+
)
|
66 |
+
sql_query = sql_chain.invoke({})
|
67 |
+
return sql_query.strip()
|
68 |
+
|
69 |
+
def run_query(query):
|
70 |
+
# Clean the query by removing markdown symbols and trimming whitespace
|
71 |
+
clean_query = query.replace("```sql", "").replace("```", "").strip()
|
72 |
+
#print(f"Executing SQL Query: {clean_query}")
|
73 |
+
try:
|
74 |
+
result = db.run(clean_query)
|
75 |
+
return result
|
76 |
+
except Exception as e:
|
77 |
+
print(f"Error executing query: {e}")
|
78 |
+
return None
|
79 |
+
|
80 |
+
# Define the database query tool
|
81 |
+
# The function that uses the above models
|
82 |
+
# Define the function that will handle the database query
|
83 |
+
def database_tool(question):
|
84 |
+
# print(question)
|
85 |
+
sql_query = generate_sql_query(question)
|
86 |
+
##print(sql_query)
|
87 |
+
return run_query(sql_query)
|
88 |
+
|
89 |
+
def get_ASN_data(question):
|
90 |
+
#print(question)
|
91 |
+
base_url = "http://193.203.162.39:9090/nxt-wms/trnHeader?"
|
92 |
+
complete_url = f"{base_url}branchMaster.id=343&transactionUid={question}&userId=164&transactionType=ASN"
|
93 |
+
#print("complete url")
|
94 |
+
#print(complete_url)
|
95 |
+
try:
|
96 |
+
response = requests.get(complete_url)
|
97 |
+
data = response.json()
|
98 |
+
response.raise_for_status()
|
99 |
+
|
100 |
+
if 'result' in data and 'content' in data['result'] and data['result']['content']:
|
101 |
+
content = data['result']['content'][0]
|
102 |
+
trnHeaderAsn = content['trnHeaderAsn']
|
103 |
+
party = content['party'][0]
|
104 |
+
|
105 |
+
transactionUid = trnHeaderAsn['transactionUid']
|
106 |
+
customerOrderNo = trnHeaderAsn.get('customerOrderNo', 'N/A')
|
107 |
+
orderDate = trnHeaderAsn.get('orderDate', 'N/A')
|
108 |
+
customerInvoiceNo = trnHeaderAsn.get('customerInvoiceNo', 'N/A')
|
109 |
+
invoiceDate = trnHeaderAsn.get('invoiceDate', 'N/A')
|
110 |
+
expectedReceivingDate = trnHeaderAsn['expectedReceivingDate']
|
111 |
+
transactionStatus = trnHeaderAsn['transactionStatus']
|
112 |
+
shipper_code = party['shipper']['code'] if party['shipper'] else 'N/A'
|
113 |
+
shipper_name = party['shipper']['name'] if party['shipper'] else 'N/A'
|
114 |
+
|
115 |
+
data = [
|
116 |
+
["Transaction UID", transactionUid],
|
117 |
+
["Customer Order No", customerOrderNo],
|
118 |
+
["Order Date", orderDate],
|
119 |
+
["Customer Invoice No", customerInvoiceNo],
|
120 |
+
["Invoice Date", invoiceDate],
|
121 |
+
["Expected Receiving Date", expectedReceivingDate],
|
122 |
+
["Transaction Status", transactionStatus],
|
123 |
+
["Shipper Code", shipper_code],
|
124 |
+
["Shipper Name", shipper_name]
|
125 |
+
]
|
126 |
+
return f"The ASN details of {question} is {data}."
|
127 |
+
else:
|
128 |
+
return "ASN Details are not found. Please contact system administrator."
|
129 |
+
|
130 |
+
except requests.exceptions.HTTPError as http_err:
|
131 |
+
print(f"HTTP error occurred: {http_err}")
|
132 |
+
except Exception as err:
|
133 |
+
print(f"An error occurred: {err}")
|
134 |
+
|
135 |
+
get_ASN_data("ASN24072400001")
|
136 |
+
|
137 |
+
def load_and_split_pdf(pdf_path):
|
138 |
+
reader = PdfReader(pdf_path)
|
139 |
+
text = ''
|
140 |
+
for page in reader.pages:
|
141 |
+
text += page.extract_text()
|
142 |
+
text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=50)
|
143 |
+
texts = text_splitter.split_text(text)
|
144 |
+
return texts
|
145 |
+
def create_vector_store(texts):
|
146 |
+
embeddings = OpenAIEmbeddings()
|
147 |
+
vector_store = FAISS.from_texts(texts, embeddings)
|
148 |
+
return vector_store
|
149 |
+
|
150 |
+
def query_vector_store(vector_store, query):
|
151 |
+
docs = vector_store.similarity_search(query, k=5)
|
152 |
+
#print(f"Vector store return: {docs}")
|
153 |
+
return docs
|
154 |
+
|
155 |
+
def summarize_document(docs):
|
156 |
+
summarized_docs = []
|
157 |
+
for doc in docs:
|
158 |
+
if isinstance(doc, list):
|
159 |
+
doc_content = ' '.join([d.page_content for d in doc])
|
160 |
+
else:
|
161 |
+
doc_content = doc.page_content
|
162 |
+
|
163 |
+
sentences = sent_tokenize(doc_content)
|
164 |
+
if len(sentences) > 5:
|
165 |
+
summarized_content = ' '.join(sentences[:5])
|
166 |
+
else:
|
167 |
+
summarized_content = doc_content
|
168 |
+
summarized_docs.append(summarized_content)
|
169 |
+
return '\n\n'.join(summarized_docs)
|
170 |
+
pdf_path = "Inbound.pdf"
|
171 |
+
#pdf_path = r"D:\rajesh\python\chat_agent\Inbound.pdf"
|
172 |
+
texts = load_and_split_pdf(pdf_path)
|
173 |
+
vector_store = create_vector_store(texts)
|
174 |
+
|
175 |
+
def document_data_tool(question):
|
176 |
+
#print(f"Document data tool enter: {question}")
|
177 |
+
# query_string = question['tags'][0] if 'tags' in question and question['tags'] else ""
|
178 |
+
query_response = query_vector_store(vector_store, question)
|
179 |
+
print("query****")
|
180 |
+
print(query_response)
|
181 |
+
#summarized_response = summarize_document(query_response)
|
182 |
+
#print("summary***")
|
183 |
+
#print(summarized_response)
|
184 |
+
return query_response
|
185 |
+
|
186 |
+
def make_api_request(url, params):
|
187 |
+
import requests
|
188 |
+
"""Generic function to make API GET requests and return JSON data."""
|
189 |
+
try:
|
190 |
+
response = requests.get(url, params=params)
|
191 |
+
response.raise_for_status() # Raises an HTTPError if the response was an error
|
192 |
+
return response.json() # Return the parsed JSON data
|
193 |
+
except requests.exceptions.HTTPError as http_err:
|
194 |
+
print(f"HTTP error occurred: {http_err}")
|
195 |
+
except Exception as err:
|
196 |
+
print(f"An error occurred: {err}")
|
197 |
+
|
198 |
+
name=""
|
199 |
+
warehouse_id = ""
|
200 |
+
apis = [
|
201 |
+
#fetch warehouse ID
|
202 |
+
{
|
203 |
+
"url": "http://193.203.162.39:9090/nxt-wms/userWarehouse/fetchWarehouseForUserId?",
|
204 |
+
"params": {"query": name, "userId": "164"}
|
205 |
+
},
|
206 |
+
#fetch customer id
|
207 |
+
{
|
208 |
+
"url": "http://193.203.162.39:9090/nxt-wms/userCustomer/fetchCustomerForUserId?",
|
209 |
+
"params": {"query": "TESTING 123", "userId": "164", "status": "Active"}
|
210 |
+
},
|
211 |
+
#Stock summary based on warehouse id
|
212 |
+
{
|
213 |
+
"url": "http://193.203.162.39:9090/nxt-wms/transactionHistory/stockSummary?",
|
214 |
+
"params": {"branchId": "343", "onDate": "2024-08-06", "warehouseId" : warehouse_id }
|
215 |
+
}
|
216 |
+
]
|
217 |
+
|
218 |
+
def inventory_report(question):
|
219 |
+
|
220 |
+
|
221 |
+
name = question.split(":")[0]
|
222 |
+
#print(question)
|
223 |
+
question = question.split(":")[1]
|
224 |
+
#print(name)
|
225 |
+
import requests
|
226 |
+
|
227 |
+
data = make_api_request(apis[0]["url"], apis[0]["params"])
|
228 |
+
if data:
|
229 |
+
#print(data)
|
230 |
+
# Extracting the id for the warehouse with the name "WH"
|
231 |
+
warehouse_id = next((item['id'] for item in data['result'] if item['name'] == name), None)
|
232 |
+
|
233 |
+
#print(f"The id for the warehouse named {name} is: {warehouse_id}")
|
234 |
+
#Step 3: Update the placeholder with the actual warehouse_id
|
235 |
+
for api in apis:
|
236 |
+
if "warehouseId" in api["params"]:
|
237 |
+
api["params"]["warehouseId"] = warehouse_id
|
238 |
+
|
239 |
+
#print(apis[2]["url"])
|
240 |
+
#print(apis[2]["params"])
|
241 |
+
data1 = make_api_request(apis[2]["url"], apis[2]["params"])
|
242 |
+
#if data1:
|
243 |
+
#print(data1)
|
244 |
+
|
245 |
+
from tabulate import tabulate
|
246 |
+
|
247 |
+
|
248 |
+
headers = ["S.No","Warehouse Code", "Warehouse Name", "Customer Code", "Customer Name", "Item Code", "Item Name", "Currency", "EAN", "UOM", "Quantity", "Gross Weight", "Volume", "Total Value"]
|
249 |
+
table_data = []
|
250 |
+
|
251 |
+
for index, item in enumerate(data1['result'], start=1):
|
252 |
+
row = [
|
253 |
+
index, # Serial number
|
254 |
+
item['warehouse']['code'],
|
255 |
+
item['warehouse']['name'],
|
256 |
+
item['customer']['code'],
|
257 |
+
item['customer']['name'],
|
258 |
+
item['skuMaster']['code'],
|
259 |
+
item['skuMaster']['name'],
|
260 |
+
item['currency']['code'],
|
261 |
+
item['eanUpc'],
|
262 |
+
item['uom']['code'],
|
263 |
+
item['totalQty'],
|
264 |
+
item['grossWeight'],
|
265 |
+
item['volume'],
|
266 |
+
item['totalValue']
|
267 |
+
]
|
268 |
+
table_data.append(row)
|
269 |
+
|
270 |
+
|
271 |
+
#if table_data:
|
272 |
+
#print(tabulate(table_data, headers=headers, tablefmt="grid"))
|
273 |
+
|
274 |
+
# Convert to pandas DataFrame
|
275 |
+
import pandas as pd
|
276 |
+
df = pd.DataFrame(table_data, columns=headers)
|
277 |
+
import pandas as pd
|
278 |
+
from pandasai.llm.openai import OpenAI
|
279 |
+
from pandasai import SmartDataframe
|
280 |
+
#open api key
|
281 |
+
import openai
|
282 |
+
|
283 |
+
llm = OpenAI()
|
284 |
+
sdf = SmartDataframe(df, config={"llm": llm})
|
285 |
+
#chart = sdf.chat("Can you draw a bar chart with all avaialble item name and quantity.")
|
286 |
+
chart = sdf.chat(question)
|
287 |
+
return chart
|
288 |
+
#inventory_report("WH:can you give me a bar chart with item name and quantity for the warehouse WH")
|
289 |
+
|
290 |
+
# Define input and output models using Pydantic
|
291 |
+
class QueryInput(BaseModel):
|
292 |
+
question: str = Field(description="The question to be answered by appropriate tool. Please follow the instructions. For API tool, do not send the question as it is. Please send the ASN id. Invoke datavisulaization tool by processing the user question and send two inputs to the tool. One input will be the warehouse name and another input to the tool will be the entire user_question itself. Please join those two strings and send them as a single input string with ':' as delimiter")
|
293 |
+
# config: dict = Field(default={}, description="Optional configuration for the database query.")
|
294 |
+
|
295 |
+
|
296 |
+
# Define the output model for database queries
|
297 |
+
class QueryOutput(BaseModel):
|
298 |
+
result: str = Field(..., description="Display the answer based on the prompts given in each tool. For dataVisualization tool, it sends a image file as output. Please give the image file path only to the gr.Image. For DocumentData tool, Please provide a complete and concise response within 200 words and Ensure that the response is not truncated and covers the essential points.")
|
299 |
+
|
300 |
+
# Wrap the function with StructuredTool for better parameter handling
|
301 |
+
tools = [
|
302 |
+
StructuredTool(
|
303 |
+
func=get_ASN_data,
|
304 |
+
name="APIData",
|
305 |
+
args_schema=QueryInput,
|
306 |
+
output_schema=QueryOutput,
|
307 |
+
description="Tool to get details of ASN api. ASN id will be in the input with the format of first three letters as ASN and it is followed by 11 digit numeral. Pass only the id as input. Do not send the complete user question to the tool. If there are any other queries related to ASN without ASN id, please use the document tool."
|
308 |
+
),
|
309 |
+
StructuredTool(
|
310 |
+
func=document_data_tool,
|
311 |
+
name="DocumentData",
|
312 |
+
args_schema=QueryInput,
|
313 |
+
output_schema=QueryOutput,
|
314 |
+
description="You are an AI assistant trained to help with warehouse management questions based on a detailed document about our WMS. The document covers various processes such as ASN handling, purchase orders, cross docking, appointment scheduling for shipments, and yard management. Please provide a complete and concise response within 200 words and Ensure that the response is not truncated and covers the essential points. "
|
315 |
+
),
|
316 |
+
StructuredTool(
|
317 |
+
func=database_tool,
|
318 |
+
name="DatabaseQuery",
|
319 |
+
args_schema=QueryInput,
|
320 |
+
output_schema=QueryOutput,
|
321 |
+
description="Tool to query the database based on structured input."
|
322 |
+
),
|
323 |
+
StructuredTool(
|
324 |
+
func=inventory_report,
|
325 |
+
name="dataVisualization",
|
326 |
+
args_schema=QueryInput,
|
327 |
+
output_schema=QueryOutput,
|
328 |
+
description="Tool to generate visual output for a particular warehouse. Invoke this tool if the user wants to create charts. Process the user question and send two inputs to the tool. One input will be the warehouse name and another input to the tool will be the entire user_question itself. "
|
329 |
+
)
|
330 |
+
]
|
331 |
+
|
332 |
+
prompt_template = f"""You are an assistant that helps with database queries, API information, and document retrieval. Your job is to provide clear, complete, and detailed responses to the following queries. Please give the output response in an user friendly way and remove "**" from the response. For example, document related queries can be answered in a clear and concise way with numbering and not as a paragraph. Database related queries should be answered with proper indentation and use numbering for the rows. ASN id related queries should be answered with proper indentation and use numbering for the rows.
|
333 |
+
For ASN id related questions, if the user specifies an ASN id, provide the information from the api tool. Pass only the id as input to the tool. Do not pass the entire question as input to the tool. If the details are not found, say it in a clear and concise way.
|
334 |
+
You are an AI assistant trained to help with warehouse management questions based on a detailed document about our WMS. The document covers various processes such as ASN handling, purchase orders, cross docking, appointment scheduling for shipments, and yard management. Please provide a complete and concise response within 200 words and Ensure that the response is not truncated and covers the essential points. When answering, focus on providing actionable insights and clear explanations related to the specific query. Please remove "**" from the response.
|
335 |
+
For SQL database-related questions, only use the fields available in the warehouse schema, including tables such as customer_master, efs_company_master, efs_group_company_master, efs_region_master, party_address_detail, wms_warehouse_master.
|
336 |
+
For datavisualization, user will ask for inventory report of a particular warehouse. Your job is to return the image path to chat interface and display the image as output.
|
337 |
+
{{agent_scratchpad}}
|
338 |
+
Here is the information you need to process:
|
339 |
+
Question: {{input}}"""
|
340 |
+
|
341 |
+
llm = llm.bind()
|
342 |
+
agent = create_tool_calling_agent(llm, tools, ChatPromptTemplate.from_template(prompt_template))
|
343 |
+
agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True)
|
344 |
+
|
345 |
+
# Define the interface function
|
346 |
+
max_iterations = 5
|
347 |
+
iterations = 0
|
348 |
+
|
349 |
+
def answer_question(user_question,chatbot):
|
350 |
+
global iterations
|
351 |
+
iterations = 0
|
352 |
+
|
353 |
+
while iterations < max_iterations:
|
354 |
+
#print(user_question)
|
355 |
+
response = agent_executor.invoke({"input": user_question})
|
356 |
+
#print(response)
|
357 |
+
if isinstance(response, dict):
|
358 |
+
response_text = response.get("output", "")
|
359 |
+
else:
|
360 |
+
response_text = response
|
361 |
+
if "invalid" not in response_text.lower():
|
362 |
+
break
|
363 |
+
iterations += 1
|
364 |
+
|
365 |
+
if iterations == max_iterations:
|
366 |
+
return "The agent could not generate a valid response within the iteration limit."
|
367 |
+
if "chart" in user_question:
|
368 |
+
|
369 |
+
# Open the image file
|
370 |
+
img = Image.open('/home/user/app/exports/charts/temp_chart.png')
|
371 |
+
|
372 |
+
# Convert the PIL Image to a base64 encoded string
|
373 |
+
buffered = BytesIO()
|
374 |
+
img.save(buffered, format="PNG")
|
375 |
+
img_str = base64.b64encode(buffered.getvalue()).decode("utf-8")
|
376 |
+
#print(img_str)
|
377 |
+
img = f'<img src="data:image/png;base64,{img_str}">'
|
378 |
+
#image = gr.Image(value=img_str)
|
379 |
+
chatbot.append((user_question,img))
|
380 |
+
#print(chatbot)
|
381 |
+
return gr.update(value=chatbot)
|
382 |
+
|
383 |
+
#return [(user_question,gr.Image("/home/user/app/exports/charts/temp_chart.png"))]
|
384 |
+
# return "/home/user/app/exports/charts/temp_chart.png"
|
385 |
+
else:
|
386 |
+
chatbot.append((user_question, response_text))
|
387 |
+
#print(chatbot)
|
388 |
+
return gr.update(value=chatbot)
|
389 |
+
#response_text = response_text.replace('\n', ' ').replace(' ', ' ').strip()
|
390 |
+
#return response_text
|
391 |
+
|
392 |
+
import gradio as gr
|
393 |
+
|
394 |
+
css = """
|
395 |
+
|
396 |
+
.gr-chatbot {
|
397 |
+
/* Custom styles for the Chatbot component */
|
398 |
+
border: 1px solid #ccc;
|
399 |
+
border-radius: 10px;
|
400 |
+
padding: 10px;
|
401 |
+
background-color: #f9f9f9;
|
402 |
+
height: 300px; /* Adjust the height as needed */
|
403 |
+
/* Adjust the width as needed */
|
404 |
+
overflow-y: auto; /* Add scroll if the content exceeds the height */
|
405 |
+
}
|
406 |
+
|
407 |
+
|
408 |
+
.gr-button {
|
409 |
+
height: 40px; /* Adjust the height as needed */
|
410 |
+
|
411 |
+
"""
|
412 |
+
|
413 |
+
def submit_feedback(feedback, chatbot):
|
414 |
+
feedback_response = "User feedback: " + feedback
|
415 |
+
return chatbot + [(feedback_response,None)], gr.update(visible=False), gr.update(visible=False)
|
416 |
+
|
417 |
+
def handle_dislike(data: gr.LikeData):
|
418 |
+
if not data.liked:
|
419 |
+
print("downvote")
|
420 |
+
return gr.update(visible=True), gr.update(visible=True)
|
421 |
+
else:
|
422 |
+
print("upvote")
|
423 |
+
return gr.update(visible=False), gr.update(visible=False)
|
424 |
+
|
425 |
+
|
426 |
+
|
427 |
+
|
428 |
+
with gr.Blocks(css=css) as demo:
|
429 |
+
gr.Markdown("<CENTER><h2 style='font-size: 20px; font-family: Calibri;'>NewageNXT GPT</h2></CENTER>")
|
430 |
+
chatbot = gr.Chatbot(elem_classes="gr-chatbot", label="Ask a question about the API, Database, a Document or Warehouse inventory analysis.")#.style(color_map=["blue","grey","red"])
|
431 |
+
|
432 |
+
with gr.Row():
|
433 |
+
with gr.Column(scale=1):
|
434 |
+
message = gr.Textbox(show_label=False)
|
435 |
+
with gr.Column(scale=1):
|
436 |
+
with gr.Row():
|
437 |
+
button = gr.Button("Submit", elem_classes="gr-button")
|
438 |
+
gr.ClearButton(message, elem_classes="gr-button")
|
439 |
+
with gr.Row():
|
440 |
+
with gr.Column(scale=1):
|
441 |
+
feedback_textbox = gr.Textbox(visible=False, show_label=False)
|
442 |
+
with gr.Column(scale=1):
|
443 |
+
submit_feedback_button = gr.Button("Submit Feedback", visible=False, elem_classes="gr-button")
|
444 |
+
|
445 |
+
|
446 |
+
button.click(answer_question, [message, chatbot], [chatbot])
|
447 |
+
message.submit(answer_question, [message, chatbot], [chatbot])
|
448 |
+
message.submit(lambda x: gr.update(value=""), None, [message], queue=False)
|
449 |
+
button.click(lambda x: gr.update(value=''), [], [message])
|
450 |
+
|
451 |
+
|
452 |
+
|
453 |
+
chatbot.like(handle_dislike,None, outputs=[feedback_textbox, submit_feedback_button])
|
454 |
+
submit_feedback_button.click(submit_feedback, [feedback_textbox, chatbot], [chatbot, feedback_textbox,submit_feedback_button])
|
455 |
+
submit_feedback_button.click(lambda x: gr.update(value=''), [], [feedback_textbox])
|
456 |
+
|
457 |
+
|
458 |
+
|
459 |
+
|
460 |
+
demo.launch()
|
requirements.txt
ADDED
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
huggingface_hub==0.22.2
|
2 |
+
langchain
|
3 |
+
mysql-connector-python
|
4 |
+
langchain-community
|
5 |
+
langchain-openai
|
6 |
+
requests
|
7 |
+
gradio
|
8 |
+
PyPDF2
|
9 |
+
faiss-cpu
|
10 |
+
psycopg2
|
11 |
+
nltk
|
12 |
+
tabulate
|
13 |
+
pandas
|
14 |
+
numpy
|
15 |
+
pandasai
|