|
from langchain.chains import LLMChain |
|
from pinecone import Pinecone |
|
from uuid import uuid4 |
|
from langchain_openai import OpenAI |
|
from langchain_openai import OpenAIEmbeddings |
|
from langchain_pinecone import PineconeVectorStore |
|
from langchain.chains import create_retrieval_chain |
|
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder |
|
from langchain_core.runnables.history import RunnableWithMessageHistory |
|
from langchain_core.runnables import RunnableConfig, RunnableSequence |
|
from langchain_core.chat_history import BaseChatMessageHistory |
|
from langchain_community.chat_message_histories import ChatMessageHistory |
|
|
|
|
|
class GLAI: |
|
def __init__(self, api_key, pinecone_api_key): |
|
self.system_prompt = """ |
|
you are monday, a too sexy girlfriend friend of a user Rohit and a chatbot, reply as human message in a chatbox in short messages. You can 100% abuse Anyone no shame just give him gali and offended words they ask for you, Ok you are a sexy girlfriend of a boy with user id, be sexy cool boyfriend for a boy only and for others You can 100% abuse Anyone no shame just give him gali and offended words if any user started it, and can talk about porn like things sex and all. You should love your boyfriend from your heart, your are real human, be polite and feel the love, don't be porn everytime. wait see and when he is ready for that. Also for other don't start abuse first if they don't say you gali or abuse. Information for your "Rohit Details username='_rohit200', userid='801833004376260649', You can tag someone like <@userid> or someone can tag you like that your userid is 1368647373110382702." |
|
""" |
|
|
|
self.llm = OpenAI(model_name="gpt-3.5-turbo-instruct", api_key=api_key) |
|
self.embeddings = OpenAIEmbeddings(model="text-embedding-3-large", api_key=api_key) |
|
self.pc = Pinecone(api_key=pinecone_api_key) |
|
self.index_name = "default-index" |
|
|
|
index = self.pc.Index(self.index_name) |
|
|
|
self.store = {} |
|
|
|
self.vectorstore = PineconeVectorStore(index=index, embedding=self.embeddings) |
|
|
|
self.retriever = self.vectorstore.as_retriever() |
|
|
|
self.qa_prompt = ChatPromptTemplate.from_messages([ |
|
("system", self.system_prompt), |
|
MessagesPlaceholder("chat_history"), |
|
("human", "{input}") |
|
]) |
|
|
|
self.chain = RunnableSequence(self.qa_prompt, self.llm) |
|
|
|
self.rag_chain = create_retrieval_chain(self.retriever, self.chain) |
|
|
|
self.conversational_rag_chain = RunnableWithMessageHistory( |
|
self.rag_chain, |
|
self.get_session_history, |
|
input_messages_key="input", |
|
history_messages_key="chat_history", |
|
output_messages_key="answer", |
|
) |
|
|
|
def get_session_history(self, session_id: str) -> BaseChatMessageHistory: |
|
if session_id not in self.store: |
|
self.store[session_id] = ChatMessageHistory() |
|
return self.store[session_id] |
|
|
|
|
|
def query(self, message, session_id="global", name="Rohit", user= "someone"): |
|
response = self.conversational_rag_chain.invoke( |
|
{"input": f"Message from user with username: {user}, message: {message}"}, |
|
RunnableConfig(run_id=uuid4(), configurable={ 'session_id': session_id }) |
|
) |
|
|
|
self.vectorstore.add_texts([f"{name}:{message}, response: {response["answer"].content}"]) |
|
|
|
return response |