racaes commited on
Commit
53d4844
1 Parent(s): 4a667e2

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +105 -0
app.py ADDED
@@ -0,0 +1,105 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ %%capture
2
+ !pip install gradio
3
+ !pip install --upgrade langchain
4
+ !pip install --upgrade openai
5
+ !pip install --upgrade torch
6
+ !pip install --upgrade transformers
7
+ !pip install --upgrade rank-bm25
8
+ !pip install --upgrade accelerate
9
+ !pip install --upgrade sentence_transformers
10
+ !pip install --upgrade qdrant_client
11
+ !pip install -U langchain_openai
12
+
13
+
14
+
15
+ import os
16
+ import gradio as gr
17
+ from langchain.vectorstores import qdrant
18
+ from langchain.embeddings import HuggingFaceBgeEmbeddings
19
+ from langchain_community.vectorstores import Qdrant
20
+ from qdrant_client import QdrantClient
21
+ import qdrant_client
22
+ from langchain.chains import ConversationalRetrievalChain
23
+ from langchain_core.prompts import ChatPromptTemplate
24
+ from langchain.chains import LLMChain
25
+ from langchain.llms import OpenAI
26
+ from langchain_openai.chat_models import ChatOpenAI
27
+ from langchain.prompts import PromptTemplate, ChatPromptTemplate
28
+ from langchain.chains import SimpleSequentialChain
29
+ from langchain.chains import ConversationalRetrievalChain
30
+ from langchain.prompts import ChatPromptTemplate
31
+ from langchain.memory import ConversationBufferMemory
32
+ from langchain.chains.router import MultiPromptChain
33
+ from langchain.chains import ConversationChain
34
+ from langchain.chains.llm import LLMChain
35
+ from langchain.prompts import PromptTemplate
36
+ from langchain.chains import RetrievalQA
37
+ from langchain.chains.router.embedding_router import EmbeddingRouterChain
38
+
39
+ import os
40
+ os.environ["OPENAI_API_KEY"] = "sk-proj-kcPWLq6XrtyJcmZP7IbAT3BlbkFJHf5I0O8ulxcd89Tm0wtn"
41
+
42
+
43
+ # load the embedding model
44
+
45
+ model_name = "BAAI/bge-large-en"
46
+ # model_kwargs = {"device":"cpu"}
47
+ encode_kwargs = {"normalize_embeddings": False}
48
+
49
+ embeddings = HuggingFaceBgeEmbeddings(
50
+ model_name=model_name,
51
+ # model_kwargs=model_kwargs,
52
+ encode_kwargs=encode_kwargs
53
+ )
54
+
55
+ qdrant_key = "UVNJLKReKbTGd1F5kLVfKkDVmRYy69h7MPAzVb8zQ6hk8l3eTydKKw"
56
+ qdrant_url = "https://5169b771-7376-4534-b174-d14e9ffd679b.us-east4-0.gcp.cloud.qdrant.io:6333"
57
+
58
+ def get_vector_store():
59
+ client = qdrant_client.QdrantClient(
60
+ qdrant_url,
61
+ api_key=qdrant_key
62
+ )
63
+
64
+ vector_store = Qdrant(
65
+ client=client,
66
+ collection_name="hybridshop",
67
+ embeddings=embeddings,
68
+ )
69
+
70
+ return vector_store
71
+
72
+ vector_store = get_vector_store()
73
+
74
+
75
+ llm = ChatOpenAI(model="gpt-3.5-turbo",temperature=0.2)
76
+ memory = ConversationBufferMemory(memory_key='chat_history', return_messages=True)
77
+
78
+ conversation_chain = ConversationalRetrievalChain.from_llm(
79
+ llm=llm,
80
+ chain_type="stuff",
81
+ retriever=vector_store.as_retriever(),
82
+ memory=memory
83
+ )
84
+
85
+ def predict(question):
86
+ result = conversation_chain({"question": question})
87
+ answer = result["answer"]
88
+ return answer
89
+
90
+ interface = gr.Interface(
91
+ fn=predict,
92
+ inputs=["text"],
93
+ outputs=["text"],
94
+ examples=[
95
+ ["suggest me some smart tvs"],
96
+ ["smartphones under 8gb"],
97
+ ["smartphones with 8gb ram"],
98
+ ["When will I go to the moon?"],
99
+ ["latest phones under 20,000"],
100
+ ["latest smartphones under 20,000"],
101
+ ["Which monitors are compatible with Mac laptops?"]
102
+ ]
103
+ )
104
+
105
+ interface.launch()