import os from typing import List from langchain.embeddings.openai import OpenAIEmbeddings from langchain.vectorstores.pinecone import Pinecone from langchain.chains import ConversationalRetrievalChain from langchain.chat_models import ChatOpenAI from langchain.memory import ChatMessageHistory, ConversationBufferMemory from langchain_core.prompts import PromptTemplate from langchain.docstore.document import Document import pinecone import chainlit as cl from cleanlab_studio import Studio pinecone.init( api_key=os.environ.get("PINECONE_API_KEY"), environment=os.environ.get("PINECONE_ENV"), ) studio = Studio(os.getenv("CLEANLAB_API_KEY")) tlm = studio.TLM(quality_preset='high') index_name = "tracker" embeddings = OpenAIEmbeddings() welcome_message = "Welcome to the Transparency Tracker! Ask me any question related to Anti-Corruption." @cl.on_chat_start async def start(): await cl.Message(content=welcome_message,disable_human_feedback=True).send() docsearch = Pinecone.from_existing_index( index_name=index_name, embedding=embeddings ) message_history = ChatMessageHistory() memory = ConversationBufferMemory( memory_key="chat_history", output_key="answer", chat_memory=message_history, return_messages=True, ) with open('./prompt.txt','r') as f: template = f.read() prompt = PromptTemplate(input_variables=["context", "question"],template=template) chain = ConversationalRetrievalChain.from_llm( llm = ChatOpenAI( model_name="gpt-3.5-turbo", temperature=0, streaming=True), chain_type="stuff", retriever=docsearch.as_retriever(search_kwargs={'k': 3}), # I only want maximum of three document back with the highest similarity score memory=memory, return_source_documents=True, combine_docs_chain_kwargs={"prompt": prompt} ) cl.user_session.set("chain", chain) @cl.action_callback("eval_button") async def evaluate_response(action): await action.remove() arr = action.value.split('|||') confidence_score = tlm.get_confidence_score(arr[0], response=arr[1]) await cl.Message(content=f"Confidence Score: {confidence_score}",disable_human_feedback=True).send() @cl.on_message async def main(message: cl.Message): chain = cl.user_session.get("chain") cb = cl.AsyncLangchainCallbackHandler() res = await chain.acall(message.content, callbacks=[cb]) answer = res["answer"] source_documents = res["source_documents"] text_elements = [] if source_documents: for source_idx, source_doc in enumerate(source_documents): source_name = f"source_{source_idx}" text_elements.append( cl.Text(content=source_doc.page_content, name=source_name) ) source_names = [text_el.name for text_el in text_elements] if source_names: answer += f"\nSources: {', '.join(source_names)}" else: answer += "\nNo sources found" actions = [ cl.Action(name="eval_button",value=f"{message.content}|||{answer}",label='Evaluate with CleanLab',description="Evaluate with CleanLab TLM (*may take a moment*)") ] await cl.Message(content=answer, elements=text_elements, actions=actions).send()