Gaurav-2273 commited on
Commit
505e588
1 Parent(s): 5fb85b9

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +100 -2
app.py CHANGED
@@ -1,3 +1,100 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import gradio as gr
2
  import json
3
  from typing import List, Dict
@@ -82,12 +179,13 @@ def answer_query(question: str):
82
  return chat_history
83
 
84
  with gr.Blocks() as demo:
85
- initialize_chatbot_from_json("embeddings.json", openai_api_key)
86
  chat_history = []
 
87
 
88
  chatbot = gr.Chatbot(label="Chatbot")
89
  question = gr.Textbox(label="Ask a question", placeholder="Type your question...")
90
  question.submit(answer_query, inputs=[question], outputs=[chatbot])
91
 
92
  if __name__ == "__main__":
93
- demo.launch()
 
1
+ # import gradio as gr
2
+ # import json
3
+ # from typing import List, Dict
4
+ # from langchain_openai.embeddings import OpenAIEmbeddings
5
+ # from langchain_chroma import Chroma
6
+ # from langchain.retrievers.multi_query import MultiQueryRetriever
7
+ # from langchain.chains import ConversationalRetrievalChain
8
+ # from langchain.memory import ConversationBufferMemory
9
+ # from langchain_openai import ChatOpenAI
10
+ # from langchain.schema import Document
11
+ # from langchain.chains import LLMChain
12
+ # from langchain.chains.question_answering import load_qa_chain
13
+ # from langchain.prompts import PromptTemplate
14
+ # import os
15
+
16
+ # openai_api_key = os.getenv("OPENAI_API_KEY")
17
+
18
+ # vectorstore = None
19
+ # llm = None
20
+ # qa_instance = None
21
+ # chat_history = []
22
+
23
+ # def load_embeddings_from_json(json_file_path: str):
24
+ # with open(json_file_path, 'r') as f:
25
+ # data = json.load(f)
26
+ # chunks = [item['chunk'] for item in data]
27
+ # embeddings = [item['embeddings'] for item in data]
28
+ # ids = [item.get('id', str(index)) for index, item in enumerate(data)]
29
+ # return chunks, embeddings, ids
30
+
31
+ # def initialize_chatbot_from_json(json_file_path: str, openai_api_key: str):
32
+ # global vectorstore, llm, qa_instance
33
+ # if vectorstore is None:
34
+ # chunks, embeddings, ids = load_embeddings_from_json(json_file_path)
35
+ # vectorstore = Chroma(
36
+ # collection_name="my_collection",
37
+ # persist_directory=None,
38
+ # embedding_function=OpenAIEmbeddings(api_key=openai_api_key)
39
+ # )
40
+ # vectorstore._client._add(
41
+ # collection_id=vectorstore._collection.id,
42
+ # ids=ids,
43
+ # embeddings=embeddings,
44
+ # metadatas=[{"source": "json"} for _ in chunks],
45
+ # documents=chunks,
46
+ # )
47
+ # if llm is None:
48
+ # llm = ChatOpenAI(api_key=openai_api_key, temperature=0.5, model="gpt-4o", verbose=True)
49
+ # retriever = MultiQueryRetriever.from_llm(retriever=vectorstore.as_retriever(), llm=llm)
50
+ # memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)
51
+ # _template = """Given the following conversation and a follow up question, rephrase the follow up question to be a
52
+ # standalone question without changing the content in given question.
53
+ # Chat History:
54
+ # {chat_history}
55
+ # Follow Up Input: {question}
56
+ # Standalone question:"""
57
+ # condense_question_prompt_template = PromptTemplate.from_template(_template)
58
+ # prompt_template = """You are a highly informative and helpful QA System specialized in providing information related to the UPSC Exam but strictly within the 'Context'. Ensure you only answer questions that are relevant to the UPSC Exam. If the question asked is not in 'Context' and not related to the UPSC Exam, do not provide an answer. Always answer in an informative and highly detailed manner, oriented towards the UPSC Exam. Also never just answer the Query, Never tell anything about 'Context'. Dont use unnecessary lines!
59
+ # Context:
60
+ # {context}
61
+ # Question: {question}
62
+ # Helpful Answer:"""
63
+ # qa_prompt = PromptTemplate(
64
+ # template=prompt_template, input_variables=["context", "question"]
65
+ # )
66
+ # question_generator = LLMChain(llm=llm, prompt=condense_question_prompt_template, memory=memory)
67
+ # doc_chain = load_qa_chain(llm, chain_type="stuff", prompt=qa_prompt)
68
+ # qa_instance = ConversationalRetrievalChain(
69
+ # retriever=retriever,
70
+ # question_generator=question_generator,
71
+ # combine_docs_chain=doc_chain,
72
+ # memory=memory)
73
+
74
+ # def answer_query(question: str):
75
+ # global chat_history
76
+ # if qa_instance is None:
77
+ # return [("Please initialize the system first.", "")]
78
+ # if not question.strip():
79
+ # return [("Please enter a question.", "")]
80
+ # result = qa_instance({"question": question})
81
+ # chat_history.append((question, result['answer']))
82
+ # return chat_history
83
+
84
+ # with gr.Blocks() as demo:
85
+ # initialize_chatbot_from_json("embeddings.json", openai_api_key)
86
+ # chat_history = []
87
+
88
+ # chatbot = gr.Chatbot(label="Chatbot")
89
+ # question = gr.Textbox(label="Ask a question", placeholder="Type your question...")
90
+ # question.submit(answer_query, inputs=[question], outputs=[chatbot])
91
+
92
+ # if __name__ == "__main__":
93
+ # demo.launch()
94
+
95
+
96
+
97
+
98
  import gradio as gr
99
  import json
100
  from typing import List, Dict
 
179
  return chat_history
180
 
181
  with gr.Blocks() as demo:
182
+ # Reset chat history and memory on initialization
183
  chat_history = []
184
+ initialize_chatbot_from_json("embeddings.json", openai_api_key)
185
 
186
  chatbot = gr.Chatbot(label="Chatbot")
187
  question = gr.Textbox(label="Ask a question", placeholder="Type your question...")
188
  question.submit(answer_query, inputs=[question], outputs=[chatbot])
189
 
190
  if __name__ == "__main__":
191
+ demo.launch()