Pega_CDH_chatbot_using_RAG / CDH_chatbot_using_RAG.py
Bhanuprasadchouki's picture
Update CDH_chatbot_using_RAG.py
50246f3 verified
import streamlit as st
from langchain_chroma import Chroma
from langchain_core.prompts import ChatPromptTemplate
from langchain_google_genai import GoogleGenerativeAI
from langchain_huggingface import HuggingFaceEmbeddings
from langchain_groq import ChatGroq
# Configuration
# GOOGLE_API_KEY = "AIzaSyDCBj-no7FOSkiZ87UaxGT1_Z_QDsCdwtI" # Replace with your API key
GROQ_API_KEY = "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"
CHROMA_DB_DIR = "./chroma_db_" # Directory for ChromaDB
MODEL_NAME = "flax-sentence-embeddings/all_datasets_v4_MiniLM-L6"
# Initialize HuggingFace Embeddings
embeddings_model = HuggingFaceEmbeddings(model_name=MODEL_NAME)
# Initialize Chroma Database
db = Chroma(collection_name="vector_database",
embedding_function=embeddings_model,
persist_directory=CHROMA_DB_DIR)
# Initialize Google Generative AI
# genai_model = GoogleGenerativeAI(api_key=GOOGLE_API_KEY, model="gemini-1.5-flash")
chat_model = ChatGroq(api_key=GROQ_API_KEY, model_name="llama3-8b-8192")
# Streamlit App
st.title("Customer Decision Hub Assistant")
st.write("Ask a question based on the CDH Documentation.")
# Input Query
query = st.text_input("Enter your question:")
if query:
with st.spinner("Retrieving context and generating an answer..."):
# Retrieve Context from ChromaDB
docs_chroma = db.similarity_search_with_score(query, k=4)
context_text = "\n\n".join([doc.page_content for doc, _score in docs_chroma])
# Generate Answer
PROMPT_TEMPLATE = """
Answer the question based only on the following context:
{context}
Answer the question based on the above context: {question}.
Provide a detailed answer.
Don’t justify your answers.
Don’t give information not mentioned in the CONTEXT INFORMATION.
Do not say "according to the context" or "mentioned in the context" or similar.
"""
prompt_template = ChatPromptTemplate.from_template(PROMPT_TEMPLATE)
prompt = prompt_template.format(context=context_text, question=query)
response_text = chat_model.invoke(prompt).content
# Display Answer
st.subheader("Answer:")
st.write(response_text)