File size: 3,422 Bytes
a63d980
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
07c8970
a63d980
 
 
f356dde
a63d980
 
 
 
 
 
07c8970
 
a63d980
 
07c8970
a63d980
07c8970
 
 
 
a63d980
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
07c8970
a63d980
 
07c8970
a63d980
07c8970
 
 
a63d980
07c8970
a63d980
 
07c8970
a63d980
 
 
 
 
 
07c8970
a63d980
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
import os
import gradio as gr
from langchain_openai import ChatOpenAI
from dotenv import load_dotenv
from langchain_huggingface import HuggingFaceEmbeddings
from langchain_chroma import Chroma
from langchain_core.documents import Document

# βœ… Load OpenAI API Key
load_dotenv()
api_key = os.getenv("OPENAI_API_KEY")

# βœ… Initialize OpenAI Model with LangChain
model = ChatOpenAI(
    model="gpt-4o-mini", 
    openai_api_key=api_key
)

# βœ… Initialize HuggingFace Embeddings
embeddings = HuggingFaceEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2")

# βœ… Initialize Chroma Vector Store
vector_store = Chroma(
    collection_name="chat_collection",  # Specify the collection name
    embedding_function=embeddings,
    persist_directory="/tmp/chroma_db",  # Directory to store data locally
)

# βœ… Step 1: Helper Functions for Chat Memory
def get_chat_history(user_id):
    """Fetches stored messages for a given user from the vector store."""
    retriever = vector_store.as_retriever(
        search_type="mmr",
        search_kwargs={"k": 100, "fetch_k": 100}
    )
    
    # Ensure filter is applied to user_id correctly
    results = retriever.invoke("Chat history", filter={"user_id": user_id})
    
    if not results:  # If no results, return empty string
        return ""
    
    # Extract the page content (chat messages) from the results
    user_history = [doc.page_content for doc in results]
    return "\n".join(user_history) if user_history else ""

def store_chat_message(user_id, user_input, bot_response):
    """Stores user-bot conversations in ChromaDB."""
    chat_entry = f"User: {user_input}\nBot: {bot_response}"
    # Add to vector store with user_id as metadata
    vector_store.add_documents([Document(page_content=chat_entry, metadata={"user_id": user_id})])

# βœ… Step 2: Generate Response Using OpenAI GPT
def generate_response(username, user_input):
    """Generates a chatbot response using GPT-4 and stores chat history."""
    user_id = username.lower().strip()
    history = get_chat_history(user_id)
    messages = [{"role": "system", "content": "You are a helpful AI assistant. Please provide answer in 20 words only"}]
    
    if history:
        messages.append({"role": "user", "content": f"Chat history:\n{history}"})
    
    messages.append({"role": "user", "content": user_input})
    
    # Generate the response
    response = model.invoke(messages)
    bot_response = response.content
    
    # Store the conversation for future reference
    store_chat_message(user_id, user_input, bot_response)
    
    # Return the entire conversation including the user's input and bot's response
    return f"{history}\nUser: {user_input}\nBot: {bot_response}"

# βœ… Step 3: Gradio UI with User Dropdown
with gr.Blocks() as demo:
    gr.Markdown("# πŸ”₯ Multi-User Chatbot with GPT-4 and Memory (ChromaDB)")
    
    # Dropdown for selecting user
    username_input = gr.Dropdown(
        label="Select User",
        choices=["Aarya", "Ved", "Vivaan"],
    )

    # Chat input and output
    chat_input = gr.Textbox(label="Your Message", placeholder="Type here...")
    chat_output = gr.Textbox(label="Chatbot Response", interactive=False)

    # Button to send the message
    chat_button = gr.Button("Send")
    chat_button.click(generate_response, inputs=[username_input, chat_input], outputs=chat_output)

# βœ… Step 4: Run the Gradio app
demo.launch()