jay-maharlika commited on
Commit
d1dab5d
1 Parent(s): 925b6d8

Delete app.py

Browse files
Files changed (1) hide show
  1. app.py +0 -93
app.py DELETED
@@ -1,93 +0,0 @@
1
- import gradio as gr
2
- import tiktoken
3
-
4
- import os
5
- from langchain.text_splitter import CharacterTextSplitter
6
- from langchain.text_splitter import RecursiveCharacterTextSplitter
7
- from langchain.vectorstores import Chroma
8
- from langchain.document_loaders import TextLoader
9
- from langchain import PromptTemplate
10
- from langchain.chains import LLMChain
11
- from langchain.chains.qa_with_sources import load_qa_with_sources_chain
12
- from langchain.llms import OpenAI
13
- from langchain.vectorstores import FAISS
14
- from langchain_openai import OpenAIEmbeddings
15
- from langchain_openai import ChatOpenAI
16
-
17
- # Load the FAISS index from the .pkl file
18
- openai_api_key = os.getenv("OPENAI_API_KEY")
19
- if not openai_api_key:
20
- raise ValueError("OPENAI_API_KEY environment variable is not set.")
21
-
22
- embeddings = OpenAIEmbeddings(openai_api_key=openai_api_key)
23
- #with open("index.pkl", "rb") as f:
24
- # db = faiss.read_index(f.read())
25
- #with open("index.pkl", "rb") as f:
26
- #db = faiss.deserialize_index(f.read())
27
-
28
- db = FAISS.load_local("faiss_index", embeddings, allow_dangerous_deserialization=True)
29
-
30
- def get_response_from_query(db, query, k=3):
31
-
32
- docs = db.similarity_search(query, k=k)
33
-
34
- docs_page_content = " ".join([d.page_content for d in docs])
35
-
36
- # llm = BardLLM()
37
- llm = ChatOpenAI(model_name="gpt-3.5-turbo-16k",temperature=0)
38
-
39
- prompt = PromptTemplate(
40
- input_variables=["question", "docs"],
41
- template="""
42
- A bot that is open to discussions about different cultural, philosophical and political exchanges. I will use do different analysis to the articles provided to me. Stay truthful and if you weren't provided any resources give your oppinion only.
43
- Answer the following question: {question}
44
- By searching the following articles: {docs}
45
- Only use the factual information from the documents. Make sure to mention key phrases from the articles.
46
- If you feel like you don't have enough information to answer the question, say "I don't know".
47
- """,
48
- )
49
-
50
- chain = LLMChain(llm=llm, prompt=prompt)
51
- response = chain.run(question=query, docs=docs_page_content,return_source_documents=True)
52
- r_text = str(response)
53
-
54
- ##evaluation part
55
-
56
- prompt_eval = PromptTemplate(
57
- input_variables=["answer", "docs"],
58
- template="""
59
- You job is to evaluate if the response to a given context is faithful.
60
- for the following: {answer}
61
- By searching the following article: {docs}
62
- Give a reason why they are similar or not, start with a Yes or a No.
63
- """,
64
- )
65
-
66
- chain_part_2 = LLMChain(llm=llm, prompt=prompt_eval)
67
-
68
-
69
- evals = chain_part_2.run(answer=r_text, docs=docs_page_content)
70
-
71
- return response,docs,evals
72
-
73
-
74
-
75
- def greet(query):
76
-
77
- answer,sources,evals = get_response_from_query(db,query,2)
78
- return answer,sources,evals
79
- examples = [
80
- ["How to be happy"],
81
- ["Climate Change Challenges in Europe"],
82
- ["Philosophy in the world of Minimalism"],
83
- ["Hate Speech vs Freedom of Speech"],
84
- ["Articles by Noam Chomsky on US Politics"],
85
- ["The importance of values and reflection"]
86
- ]
87
- demo = gr.Interface(fn=greet, title="cicero-semantic-search", inputs="text",
88
- outputs=[gr.components.Textbox(lines=3, label="Response"),
89
- gr.components.Textbox(lines=3, label="Source"),
90
- gr.components.Textbox(lines=3, label="Evaluation")],
91
- examples=examples)
92
-
93
- demo.launch(share=True)