akileshjayakumar commited on
Commit
a6eeca1
1 Parent(s): a188b31

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +69 -0
app.py ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # from langchain.embeddings import OpenAIEmbeddings
2
+ import gradio as gr
3
+ from langchain_community.chat_models import ChatOpenAI
4
+ from langchain.chains import RetrievalQA
5
+ from langchain_openai import OpenAIEmbeddings
6
+ from langchain.vectorstores import Qdrant
7
+ from langchain.document_loaders import TextLoader
8
+ from langchain.text_splitter import CharacterTextSplitter
9
+ from qdrant_client.models import VectorParams, Distance
10
+ import os
11
+ from dotenv import load_dotenv
12
+
13
+ load_dotenv()
14
+
15
+ # Load your documents
16
+ loader = TextLoader("about_me.txt")
17
+ documents = loader.load()
18
+
19
+ # Split the documents into manageable chunks
20
+ text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
21
+ docs = text_splitter.split_documents(documents)
22
+
23
+ # Embed the documents using OpenAI's embeddings
24
+ embeddings = OpenAIEmbeddings()
25
+
26
+ # Create the Qdrant vector store using connection parameters
27
+ vector_store = Qdrant.from_documents(
28
+ documents=docs,
29
+ embedding=embeddings,
30
+ url=os.getenv("QDRANT_URL"),
31
+ api_key=os.getenv("QDRANT_API_KEY"),
32
+ collection_name="akilesh_docs"
33
+ )
34
+
35
+ # Set up the OpenAI LLM
36
+ llm = ChatOpenAI(model_name="gpt-4")
37
+
38
+ # Assume vector_store is defined elsewhere, and create a retriever from it
39
+ retriever = vector_store.as_retriever()
40
+
41
+ # Create the RAG chain using the retriever
42
+ qa_chain = RetrievalQA.from_chain_type(
43
+ llm=llm,
44
+ chain_type="stuff",
45
+ retriever=retriever
46
+ )
47
+
48
+ print(qa_chain)
49
+
50
+
51
+ def respond(message):
52
+ # Generate the LLM response
53
+ result = qa_chain({"query": message})
54
+ response = result['result']
55
+ return response
56
+
57
+
58
+ def generate_answer(message: str, history: list) -> str:
59
+ # Get the new response from the LLM
60
+ new_response = respond(message)
61
+ return new_response
62
+
63
+
64
+ # Create the ChatInterface
65
+ demo = gr.ChatInterface(
66
+ fn=generate_answer, title="RAG App | Learn More About Me!", multimodal=False, retry_btn=None, undo_btn=None, clear_btn=None)
67
+
68
+ # Launch the Gradio interface
69
+ demo.launch()