Ushopintel / app.py
racaes's picture
Create app.py
53d4844 verified
%%capture
!pip install gradio
!pip install --upgrade langchain
!pip install --upgrade openai
!pip install --upgrade torch
!pip install --upgrade transformers
!pip install --upgrade rank-bm25
!pip install --upgrade accelerate
!pip install --upgrade sentence_transformers
!pip install --upgrade qdrant_client
!pip install -U langchain_openai
import os
import gradio as gr
from langchain.vectorstores import qdrant
from langchain.embeddings import HuggingFaceBgeEmbeddings
from langchain_community.vectorstores import Qdrant
from qdrant_client import QdrantClient
import qdrant_client
from langchain.chains import ConversationalRetrievalChain
from langchain_core.prompts import ChatPromptTemplate
from langchain.chains import LLMChain
from langchain.llms import OpenAI
from langchain_openai.chat_models import ChatOpenAI
from langchain.prompts import PromptTemplate, ChatPromptTemplate
from langchain.chains import SimpleSequentialChain
from langchain.chains import ConversationalRetrievalChain
from langchain.prompts import ChatPromptTemplate
from langchain.memory import ConversationBufferMemory
from langchain.chains.router import MultiPromptChain
from langchain.chains import ConversationChain
from langchain.chains.llm import LLMChain
from langchain.prompts import PromptTemplate
from langchain.chains import RetrievalQA
from langchain.chains.router.embedding_router import EmbeddingRouterChain
import os
os.environ["OPENAI_API_KEY"] = "sk-proj-kcPWLq6XrtyJcmZP7IbAT3BlbkFJHf5I0O8ulxcd89Tm0wtn"
# load the embedding model
model_name = "BAAI/bge-large-en"
# model_kwargs = {"device":"cpu"}
encode_kwargs = {"normalize_embeddings": False}
embeddings = HuggingFaceBgeEmbeddings(
model_name=model_name,
# model_kwargs=model_kwargs,
encode_kwargs=encode_kwargs
)
qdrant_key = "UVNJLKReKbTGd1F5kLVfKkDVmRYy69h7MPAzVb8zQ6hk8l3eTydKKw"
qdrant_url = "https://5169b771-7376-4534-b174-d14e9ffd679b.us-east4-0.gcp.cloud.qdrant.io:6333"
def get_vector_store():
client = qdrant_client.QdrantClient(
qdrant_url,
api_key=qdrant_key
)
vector_store = Qdrant(
client=client,
collection_name="hybridshop",
embeddings=embeddings,
)
return vector_store
vector_store = get_vector_store()
llm = ChatOpenAI(model="gpt-3.5-turbo",temperature=0.2)
memory = ConversationBufferMemory(memory_key='chat_history', return_messages=True)
conversation_chain = ConversationalRetrievalChain.from_llm(
llm=llm,
chain_type="stuff",
retriever=vector_store.as_retriever(),
memory=memory
)
def predict(question):
result = conversation_chain({"question": question})
answer = result["answer"]
return answer
interface = gr.Interface(
fn=predict,
inputs=["text"],
outputs=["text"],
examples=[
["suggest me some smart tvs"],
["smartphones under 8gb"],
["smartphones with 8gb ram"],
["When will I go to the moon?"],
["latest phones under 20,000"],
["latest smartphones under 20,000"],
["Which monitors are compatible with Mac laptops?"]
]
)
interface.launch()