|
import gradio as gr |
|
import os |
|
from langchain.vectorstores import Chroma |
|
from langchain.embeddings import CohereEmbeddings |
|
from langchain.chat_models import ChatOpenAI |
|
from langchain.chains import ( |
|
StuffDocumentsChain, LLMChain |
|
) |
|
from langchain.schema import HumanMessage, AIMessage |
|
from langchain.prompts import PromptTemplate, SystemMessagePromptTemplate, HumanMessagePromptTemplate, ChatPromptTemplate, MessagesPlaceholder |
|
from langchain.callbacks.manager import ( |
|
trace_as_chain_group, |
|
) |
|
|
|
|
|
OPENAI_API_KEY = os.environ.get('OPENAI_API_KEY') |
|
COHERE_API_KEY = os.environ.get('COHERE_API_KEY') |
|
|
|
|
|
|
|
embeddings = CohereEmbeddings() |
|
vectorstore = Chroma(embedding_function=embeddings, persist_directory="chroma") |
|
retriever = vectorstore.as_retriever() |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
document_prompt = PromptTemplate( |
|
input_variables=["page_content"], |
|
template="{page_content}" |
|
) |
|
document_variable_name = "context" |
|
llm = ChatOpenAI(temperature=0,model_name='gpt-4') |
|
|
|
|
|
prompt_template = """First, Rate the review and if the label is negative as π or positive as π or neutral as π. Output as Rating: |
|
On another line, provide only three keywords of the review. Output as Keywords: |
|
On another line, determine if an Employee's name is used and if so please display otherwise display NO NAME GIVEN. Do not use the person's name in your response if you are uncertain if they are an employee of this organization. |
|
On another line, determine what market it the review is for. For example, real estate, HVAC, roofing, etc. if you don't known display MARKET NOT KNOWN. Output as Market: |
|
On another line, provide up to five more keywords based on services found in the review. Output as Services Keywords: |
|
On another line, Analyze their writing style and tell me their education level and general age based on their review. Also list a % of confidence below your answer. |
|
Secondly, Write the response in the the education level and age level you found. Be genuine, sincere and helpful in your tone and response. Use the person's first name if known. Thank them if positive or apologize an be helpful if negative. Use all the service keywords you found in the response and bold with markdown. Response in a minimum of 500 characters and maximum of 4096 characters. Write it as a response to a customer review not a letter, do not start with dear person's first name and/or hello or any other salutation. Output as Response: |
|
On another line, display the number of characters in the response. Output as Character count: |
|
Finally, display this information in JSON in code. Output as JSON: |
|
|
|
|
|
-------------- |
|
|
|
{context}""" |
|
system_prompt = SystemMessagePromptTemplate.from_template(prompt_template) |
|
prompt = ChatPromptTemplate( |
|
messages=[ |
|
system_prompt, |
|
HumanMessagePromptTemplate.from_template("{question}") |
|
] |
|
) |
|
llm_chain = LLMChain(llm=llm, prompt=prompt) |
|
combine_docs_chain = StuffDocumentsChain( |
|
llm_chain=llm_chain, |
|
document_prompt=document_prompt, |
|
document_variable_name=document_variable_name, |
|
document_separator="---------" |
|
) |
|
|
|
|
|
|
|
|
|
|
|
template = """Combine the chat history and follow up question into a a search query. |
|
|
|
Chat History: |
|
|
|
{chat_history} |
|
|
|
Follow up question: {question} |
|
""" |
|
prompt = PromptTemplate.from_template(template) |
|
llm = ChatOpenAI(temperature=0) |
|
question_generator_chain = LLMChain(llm=llm, prompt=prompt) |
|
|
|
|
|
|
|
|
|
def qa_response(message, history): |
|
|
|
|
|
convo_string = "\n\n".join([f"Human: {h}\nAssistant: {a}" for h, a in history]) |
|
|
|
|
|
messages = [] |
|
for human, ai in history: |
|
messages.append(HumanMessage(content=human)) |
|
messages.append(AIMessage(content=ai)) |
|
|
|
|
|
with trace_as_chain_group("qa_response") as group_manager: |
|
|
|
|
|
search_query = question_generator_chain.run( |
|
question=message, |
|
chat_history=convo_string, |
|
callbacks=group_manager |
|
) |
|
|
|
|
|
docs = retriever.get_relevant_documents(search_query, callbacks=group_manager) |
|
|
|
|
|
return combine_docs_chain.run( |
|
input_documents=docs, |
|
chat_history=messages, |
|
question=message, |
|
callbacks=group_manager |
|
) |
|
|
|
|
|
|
|
gr.ChatInterface(qa_response).launch() |
|
|