File size: 2,636 Bytes
604255a
 
 
 
 
c3b6b19
604255a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder, HumanMessagePromptTemplate
from langchain_core.messages import HumanMessage, AIMessage
from langchain_groq import ChatGroq
from typing import List
import os
from src.services.prompts import ASSISTANT_PROMPT
from langchain.memory import ConversationSummaryMemory
from dotenv import load_dotenv
load_dotenv()
os.environ["GROQ_API_KEY"]=os.getenv("GROQ_API_KEY")

class ConversationHandler:
    def __init__(self, model_name="llama-3.3-70b-versatile", temperature=0.7):
        self.chat_model = ChatGroq(
            model_name=model_name,
            temperature=temperature
        )
        self.prompt = ChatPromptTemplate.from_messages([
            ("system", ASSISTANT_PROMPT)])
        self.memory=ConversationSummaryMemory(
            llm=self.chat_model,
            max_token_limit=2000,
            return_messages=True,
            memory_key="chat_history" 
        )
    
    async def give_response(self,user_input):
        chain= self.prompt|self.chat_model
        memory_variables = self.memory.load_memory_variables({})
        response=await chain.ainvoke(
            {
                "user_query": user_input,
                "chat_history": memory_variables["chat_history"]

                
            }
        )
        self.memory.save_context(
            {"input": user_input},
            {"output": response.content}
        )
        return response.content
    async def summarize_conversation(self) -> str:
        memory_variables =  self.memory.load_memory_variables({})
        return self.memory.predict_new_summary(
            messages=memory_variables["chat_history"],
            existing_summary=""
        )
    
    async def clear_memory(self):
        await self.memory.clear()

    

if __name__ == "__main__":
    import asyncio

    async def main():
        handler = ConversationHandler()
        
        # Start a conversation
        response1 = await handler.give_response("What is machine learning?by the way my name is ravi and my home town is ilam.")
        print("Response 1:", response1.content)
        # Continue the conversation
        response2 = await handler.give_response("Can you give me an example of machine learning mathematics?")
        print("Response 2:", response2.content)
        response2 = await handler.give_response("Can you tell me more example? and also tell me from which country i am?")
        print("Response 2:", response2.content)
        summary = await handler.summarize_conversation()
        print("\nConversation Summary:", summary)
    asyncio.run(main())