Spaces:
Runtime error
Runtime error
jay-maharlika
commited on
Commit
•
925b6d8
1
Parent(s):
47d83dd
Update app.py
Browse files
app.py
CHANGED
@@ -1,39 +1,39 @@
|
|
1 |
-
import os
|
2 |
import gradio as gr
|
3 |
-
import
|
4 |
|
5 |
-
|
6 |
from langchain.text_splitter import CharacterTextSplitter
|
7 |
from langchain.text_splitter import RecursiveCharacterTextSplitter
|
8 |
-
|
9 |
-
from
|
10 |
-
from
|
11 |
-
from langchain_community.chat_models import ChatOpenAI
|
12 |
-
# from langchain import PromptTemplate
|
13 |
-
from langchain_core.prompts import PromptTemplate
|
14 |
from langchain.chains import LLMChain
|
15 |
-
from
|
16 |
-
|
17 |
-
from
|
18 |
-
|
19 |
-
|
20 |
|
|
|
21 |
openai_api_key = os.getenv("OPENAI_API_KEY")
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
|
|
|
|
|
|
|
|
26 |
|
|
|
27 |
|
28 |
-
#------------------------------------------------------------------------------
|
29 |
-
db=FAISS.load_local("faiss_index", embeddings,allow_dangerous_deserialization=True)
|
30 |
-
#-----------------------------------------------------------------------------
|
31 |
def get_response_from_query(db, query, k=3):
|
32 |
|
33 |
docs = db.similarity_search(query, k=k)
|
34 |
|
35 |
docs_page_content = " ".join([d.page_content for d in docs])
|
36 |
|
|
|
37 |
llm = ChatOpenAI(model_name="gpt-3.5-turbo-16k",temperature=0)
|
38 |
|
39 |
prompt = PromptTemplate(
|
@@ -48,9 +48,6 @@ def get_response_from_query(db, query, k=3):
|
|
48 |
)
|
49 |
|
50 |
chain = LLMChain(llm=llm, prompt=prompt)
|
51 |
-
# chain = RetrievalQAWithSourcesChain.from_chain_type(llm=llm, prompt=prompt,
|
52 |
-
# chain_type="stuff", retriever=db.as_retriever(), return_source_documents=True)
|
53 |
-
|
54 |
response = chain.run(question=query, docs=docs_page_content,return_source_documents=True)
|
55 |
r_text = str(response)
|
56 |
|
@@ -73,14 +70,24 @@ def get_response_from_query(db, query, k=3):
|
|
73 |
|
74 |
return response,docs,evals
|
75 |
|
|
|
|
|
76 |
def greet(query):
|
77 |
|
78 |
answer,sources,evals = get_response_from_query(db,query,2)
|
79 |
return answer,sources,evals
|
80 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
81 |
demo = gr.Interface(fn=greet, title="cicero-semantic-search", inputs="text",
|
82 |
outputs=[gr.components.Textbox(lines=3, label="Response"),
|
83 |
gr.components.Textbox(lines=3, label="Source"),
|
84 |
-
gr.components.Textbox(lines=3, label="Evaluation")]
|
|
|
85 |
|
86 |
-
demo.launch(share=True
|
|
|
|
|
1 |
import gradio as gr
|
2 |
+
import tiktoken
|
3 |
|
4 |
+
import os
|
5 |
from langchain.text_splitter import CharacterTextSplitter
|
6 |
from langchain.text_splitter import RecursiveCharacterTextSplitter
|
7 |
+
from langchain.vectorstores import Chroma
|
8 |
+
from langchain.document_loaders import TextLoader
|
9 |
+
from langchain import PromptTemplate
|
|
|
|
|
|
|
10 |
from langchain.chains import LLMChain
|
11 |
+
from langchain.chains.qa_with_sources import load_qa_with_sources_chain
|
12 |
+
from langchain.llms import OpenAI
|
13 |
+
from langchain.vectorstores import FAISS
|
14 |
+
from langchain_openai import OpenAIEmbeddings
|
15 |
+
from langchain_openai import ChatOpenAI
|
16 |
|
17 |
+
# Load the FAISS index from the .pkl file
|
18 |
openai_api_key = os.getenv("OPENAI_API_KEY")
|
19 |
+
if not openai_api_key:
|
20 |
+
raise ValueError("OPENAI_API_KEY environment variable is not set.")
|
21 |
+
|
22 |
+
embeddings = OpenAIEmbeddings(openai_api_key=openai_api_key)
|
23 |
+
#with open("index.pkl", "rb") as f:
|
24 |
+
# db = faiss.read_index(f.read())
|
25 |
+
#with open("index.pkl", "rb") as f:
|
26 |
+
#db = faiss.deserialize_index(f.read())
|
27 |
|
28 |
+
db = FAISS.load_local("faiss_index", embeddings, allow_dangerous_deserialization=True)
|
29 |
|
|
|
|
|
|
|
30 |
def get_response_from_query(db, query, k=3):
|
31 |
|
32 |
docs = db.similarity_search(query, k=k)
|
33 |
|
34 |
docs_page_content = " ".join([d.page_content for d in docs])
|
35 |
|
36 |
+
# llm = BardLLM()
|
37 |
llm = ChatOpenAI(model_name="gpt-3.5-turbo-16k",temperature=0)
|
38 |
|
39 |
prompt = PromptTemplate(
|
|
|
48 |
)
|
49 |
|
50 |
chain = LLMChain(llm=llm, prompt=prompt)
|
|
|
|
|
|
|
51 |
response = chain.run(question=query, docs=docs_page_content,return_source_documents=True)
|
52 |
r_text = str(response)
|
53 |
|
|
|
70 |
|
71 |
return response,docs,evals
|
72 |
|
73 |
+
|
74 |
+
|
75 |
def greet(query):
|
76 |
|
77 |
answer,sources,evals = get_response_from_query(db,query,2)
|
78 |
return answer,sources,evals
|
79 |
+
examples = [
|
80 |
+
["How to be happy"],
|
81 |
+
["Climate Change Challenges in Europe"],
|
82 |
+
["Philosophy in the world of Minimalism"],
|
83 |
+
["Hate Speech vs Freedom of Speech"],
|
84 |
+
["Articles by Noam Chomsky on US Politics"],
|
85 |
+
["The importance of values and reflection"]
|
86 |
+
]
|
87 |
demo = gr.Interface(fn=greet, title="cicero-semantic-search", inputs="text",
|
88 |
outputs=[gr.components.Textbox(lines=3, label="Response"),
|
89 |
gr.components.Textbox(lines=3, label="Source"),
|
90 |
+
gr.components.Textbox(lines=3, label="Evaluation")],
|
91 |
+
examples=examples)
|
92 |
|
93 |
+
demo.launch(share=True)
|