|
from langchain_community.embeddings import GPT4AllEmbeddings |
|
from langchain_community.vectorstores import FAISS |
|
from langchain.embeddings import OpenAIEmbeddings |
|
from langchain.vectorstores import Chroma |
|
from langchain.chains import RetrievalQA |
|
from langchain import PromptTemplate |
|
from langchain_openai import OpenAI |
|
from time import time |
|
import pandas as pd |
|
import numpy as np |
|
import getpass |
|
import re |
|
import os |
|
import gradio as gr |
|
|
|
|
|
|
|
os.environ['OPENAI_API_KEY'] |
|
|
|
|
|
|
|
"""#### Load predefined chroma""" |
|
|
|
persist_directory = 'chromadb' |
|
|
|
embedding = OpenAIEmbeddings(model='text-embedding-3-large') |
|
vectordb = Chroma(persist_directory=persist_directory, embedding_function=embedding) |
|
retriever = vectordb.as_retriever(search_type="similarity",search_kwargs={"k":50}) |
|
|
|
"""## MultiQueryRetriever |
|
""" |
|
|
|
from langchain.retrievers.multi_query import MultiQueryRetriever |
|
from langchain_openai import ChatOpenAI |
|
|
|
llm = ChatOpenAI(model_name="gpt-4-turbo",temperature=0) |
|
retriever_from_llm = MultiQueryRetriever.from_llm( |
|
retriever=vectordb.as_retriever(search_type="mmr", |
|
search_kwargs={"k":50}), |
|
llm=llm |
|
) |
|
|
|
import logging |
|
|
|
logging.basicConfig() |
|
logging.getLogger("langchain.retrievers.multi_query").setLevel(logging.INFO) |
|
|
|
from langchain.prompts import PromptTemplate |
|
from langchain.chains import LLMChain |
|
|
|
qa_prompt = PromptTemplate( |
|
input_variables=['query','contexts'], |
|
template = """ |
|
You are a recommendation system that analyze the user's interest and |
|
generate an email subject and body for PETCO. If the |
|
question cannot be answered using the information provided answer |
|
with 'I don't know'. |
|
Context: {context} |
|
Question: {query}, |
|
""", |
|
) |
|
|
|
qa_chain = LLMChain(llm=llm, prompt=qa_prompt) |
|
|
|
|
|
def call_rag(question): |
|
|
|
docs = retriever_from_llm.get_relevant_documents(query=question) |
|
|
|
out = qa_chain.invoke( |
|
input={ |
|
"query": question, |
|
"context": "\n---\n".join([d.page_content for d in docs]) |
|
} |
|
) |
|
|
|
return out["text"] |
|
|
|
user_db={ |
|
os.environ["username"]:os.environ["password"], |
|
} |
|
interface = gr.Interface( |
|
fn=call_rag, |
|
inputs="text", |
|
outputs="text", |
|
title="PETCO Email Generator Using RAG", |
|
description=""" |
|
Try input below example prompts in the model! |
|
|
|
Example prompt: |
|
\n |
|
1. Send an email that conveys to consumers that they are able to get $2 off all online purchases above $10 on Valentines’ Day 2024. Please use the appropriate emojis for the holiday. |
|
|
|
2. Generate a welcoming email for a new pet owner who just adopted a puppy, including a checklist of essential items they need to buy. |
|
""", |
|
) |
|
|
|
if __name__=="__main__": |
|
interface.launch( |
|
auth=lambda u,p: user_db.get(u)==p, |
|
auth_message="Welcome! Please enable third party cookies or you will not be able to login." |
|
|
|
) |
|
|
|
|
|
|