Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -7,11 +7,6 @@ Original file is located at
|
|
7 |
https://colab.research.google.com/drive/1wUztAR4EdQUL3vkpM3Is-ps0TEocClry
|
8 |
"""
|
9 |
|
10 |
-
# !pip install langchain openai qdrant-client gradio pandas tiktoken -U langchain-community
|
11 |
-
|
12 |
-
# from google.colab import userdata
|
13 |
-
# openai_api_key=userdata.get('openai_api_key')
|
14 |
-
|
15 |
import gradio as gr
|
16 |
import pandas as pd
|
17 |
from langchain.document_loaders.csv_loader import CSVLoader
|
@@ -21,10 +16,6 @@ from langchain.vectorstores import Qdrant
|
|
21 |
from langchain.chains import VectorDBQA
|
22 |
from langchain.llms import OpenAI
|
23 |
|
24 |
-
# qdrant_url=userdata.get('Qdrant')
|
25 |
-
# qdrant_api_key=userdata.get('qdrant_api_key')
|
26 |
-
# openai_api_key=userdata.get('openai_api_key')
|
27 |
-
# # groq_api_key=userdata.get('GROQ_API_KEY')
|
28 |
|
29 |
import os
|
30 |
|
@@ -32,7 +23,6 @@ openai_api_key = os.getenv('openai_api_key')
|
|
32 |
qdrant_url = os.getenv('QDRANT_URL')
|
33 |
qdrant_api_key = os.getenv('qdrant_api_key')
|
34 |
|
35 |
-
# Now you can use these keys in your application
|
36 |
|
37 |
|
38 |
#csv loader
|
@@ -43,7 +33,6 @@ data=loader.load()
|
|
43 |
text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=200)
|
44 |
texts = text_splitter.split_documents(data)
|
45 |
|
46 |
-
len(texts)
|
47 |
|
48 |
#embeding
|
49 |
embeding=OpenAIEmbeddings(openai_api_key=openai_api_key, model="text-embedding-3-small")
|
@@ -82,15 +71,6 @@ from re import search
|
|
82 |
#retriver
|
83 |
retriver=qdrant.as_retriever( search_type="similarity", search_kwargs={"k":2})
|
84 |
|
85 |
-
#search query
|
86 |
-
query="show me a best darmatology doctor in peshawar "
|
87 |
-
docs=retriver.get_relevant_documents(query)
|
88 |
-
|
89 |
-
#write a code for prety print
|
90 |
-
# for i in docs:
|
91 |
-
# print(i.page_content)
|
92 |
-
|
93 |
-
# docs[0].metadata.items()
|
94 |
|
95 |
from langchain import PromptTemplate
|
96 |
|
@@ -134,13 +114,7 @@ prompt = PromptTemplate(
|
|
134 |
input_variables=["context", "question"]
|
135 |
)
|
136 |
|
137 |
-
# #import conversation
|
138 |
-
# from langchain.memory import ConversationBufferMemory
|
139 |
-
# memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)
|
140 |
-
|
141 |
-
# !pip install langchain-openai
|
142 |
|
143 |
-
#import ChatOpenAI
|
144 |
from langchain.chat_models import ChatOpenAI
|
145 |
llm = ChatOpenAI(model_name="gpt-4o", temperature=0, openai_api_key=openai_api_key)
|
146 |
|
@@ -168,64 +142,17 @@ rag_chain = (
|
|
168 |
| StrOutputParser()
|
169 |
)
|
170 |
|
171 |
-
|
172 |
-
|
173 |
-
|
174 |
-
# import gradio as gr
|
175 |
-
|
176 |
-
# # Gradio Interface
|
177 |
-
# def search_doctor(input_text):
|
178 |
-
# return rag_chain.invoke(input_text)
|
179 |
-
|
180 |
-
# # Create the Gradio interface
|
181 |
-
# iface = gr.Interface(
|
182 |
-
# fn=search_doctor,
|
183 |
-
# inputs=gr.Textbox(lines=1, label="Ask a medical question"),
|
184 |
-
# outputs=gr.Textbox(label="Answer"),
|
185 |
-
# title="Medical Assistant",
|
186 |
-
# description="Find the best doctors based on your medical needs.",
|
187 |
-
# allow_flagging="never",
|
188 |
-
# theme="default",
|
189 |
-
# css=".gradio-container {border-radius: 10px; padding: 10px; background-color: #f9f9f9;} .gr-button {visibility: hidden;}"
|
190 |
-
# )
|
191 |
-
|
192 |
-
# # Launch the interface without the Gradio logo
|
193 |
-
# iface.launch(show_api=False)
|
194 |
-
|
195 |
-
# import gradio as gr
|
196 |
-
|
197 |
-
# # Example RAG model invocation function (replace with your actual function)
|
198 |
-
# def rag_model_query(query):
|
199 |
-
# # Replace with actual RAG model invocation
|
200 |
-
# return rag_chain.invoke(query)
|
201 |
-
|
202 |
-
# # Define the Gradio function to handle both echo and RAG queries
|
203 |
-
# def handle_message(message, history):
|
204 |
-
# # Check if the message contains a keyword to trigger RAG model
|
205 |
-
# if "doctor" in message["text"].lower():
|
206 |
-
# response = rag_model_query(message["text"])
|
207 |
-
# else:
|
208 |
-
# response = message["text"]
|
209 |
-
# return response
|
210 |
-
|
211 |
-
# # Create the Gradio interface
|
212 |
-
# demo = gr.ChatInterface(
|
213 |
-
# fn=handle_message,
|
214 |
-
# title="Medical Assistant",
|
215 |
-
# multimodal=True,
|
216 |
-
# )
|
217 |
-
|
218 |
-
# demo.launch()
|
219 |
|
220 |
from langchain.chat_models import ChatOpenAI
|
221 |
from langchain.schema import AIMessage, HumanMessage
|
222 |
import openai
|
223 |
import os
|
224 |
import gradio as gr
|
225 |
-
# os.environ["OPENAI_API_KEY"] = openai_api_key # Replace with your key
|
226 |
|
227 |
llm = ChatOpenAI(temperature=1.0, model='gpt-4o', openai_api_key=openai_api_key)
|
228 |
-
# llm = ChatOpenAI(model_name="gpt-4o", temperature=0, openai_api_key=openai_api_key, memory=memory)
|
229 |
|
230 |
def reg(message, history):
|
231 |
history_langchain_format = []
|
@@ -235,13 +162,6 @@ def reg(message, history):
|
|
235 |
history_langchain_format.append(HumanMessage(content=message))
|
236 |
gpt_response = llm(history_langchain_format)
|
237 |
return rag_chain.invoke(message)
|
238 |
-
|
239 |
-
|
240 |
-
# fn=reg,
|
241 |
-
# title="Medical Assistant",
|
242 |
-
# # theme="soft",
|
243 |
-
# )
|
244 |
-
|
245 |
-
# demo.launch(show_api=False)
|
246 |
-
gr.ChatInterface(predict).launch()
|
247 |
|
|
|
7 |
https://colab.research.google.com/drive/1wUztAR4EdQUL3vkpM3Is-ps0TEocClry
|
8 |
"""
|
9 |
|
|
|
|
|
|
|
|
|
|
|
10 |
import gradio as gr
|
11 |
import pandas as pd
|
12 |
from langchain.document_loaders.csv_loader import CSVLoader
|
|
|
16 |
from langchain.chains import VectorDBQA
|
17 |
from langchain.llms import OpenAI
|
18 |
|
|
|
|
|
|
|
|
|
19 |
|
20 |
import os
|
21 |
|
|
|
23 |
qdrant_url = os.getenv('QDRANT_URL')
|
24 |
qdrant_api_key = os.getenv('qdrant_api_key')
|
25 |
|
|
|
26 |
|
27 |
|
28 |
#csv loader
|
|
|
33 |
text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=200)
|
34 |
texts = text_splitter.split_documents(data)
|
35 |
|
|
|
36 |
|
37 |
#embeding
|
38 |
embeding=OpenAIEmbeddings(openai_api_key=openai_api_key, model="text-embedding-3-small")
|
|
|
71 |
#retriver
|
72 |
retriver=qdrant.as_retriever( search_type="similarity", search_kwargs={"k":2})
|
73 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
74 |
|
75 |
from langchain import PromptTemplate
|
76 |
|
|
|
114 |
input_variables=["context", "question"]
|
115 |
)
|
116 |
|
|
|
|
|
|
|
|
|
|
|
117 |
|
|
|
118 |
from langchain.chat_models import ChatOpenAI
|
119 |
llm = ChatOpenAI(model_name="gpt-4o", temperature=0, openai_api_key=openai_api_key)
|
120 |
|
|
|
142 |
| StrOutputParser()
|
143 |
)
|
144 |
|
145 |
+
|
146 |
+
|
147 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
148 |
|
149 |
from langchain.chat_models import ChatOpenAI
|
150 |
from langchain.schema import AIMessage, HumanMessage
|
151 |
import openai
|
152 |
import os
|
153 |
import gradio as gr
|
|
|
154 |
|
155 |
llm = ChatOpenAI(temperature=1.0, model='gpt-4o', openai_api_key=openai_api_key)
|
|
|
156 |
|
157 |
def reg(message, history):
|
158 |
history_langchain_format = []
|
|
|
162 |
history_langchain_format.append(HumanMessage(content=message))
|
163 |
gpt_response = llm(history_langchain_format)
|
164 |
return rag_chain.invoke(message)
|
165 |
+
|
166 |
+
gr.ChatInterface(reg).launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
167 |
|