Spaces:
Runtime error
Runtime error
File size: 5,609 Bytes
d797b64 b8a4b88 d4df4c0 d797b64 d4df4c0 d797b64 d4df4c0 d797b64 c39682e d797b64 c39682e d797b64 c39682e d797b64 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 |
import pandas as pd
import nltk
import os
import langchain
from langchain.schema import Document
from langchain.document_loaders import TextLoader
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.vectorstores import Chroma
from langchain.text_splitter import CharacterTextSplitter
from langchain import OpenAI, VectorDBQA
from langchain.document_loaders import DirectoryLoader
from langchain.document_loaders import UnstructuredURLLoader
from langchain.document_loaders import UnstructuredFileLoader
from langchain.chains.conversation.memory import ConversationBufferMemory
from langchain.chains import RetrievalQA
from langchain.document_loaders import OnlinePDFLoader
from langchain.llms import OpenAIChat
from langchain.vectorstores import DeepLake
from langchain.document_loaders import SeleniumURLLoader
from langchain.chains import ConversationalRetrievalChain
from langchain.document_loaders.csv_loader import CSVLoader
from langchain.vectorstores import FAISS
import tempfile
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.vectorstores import FAISS
from langchain.chains.question_answering import load_qa_chain
from langchain.llms import OpenAI
from langchain.prompts import PromptTemplate
from langchain.memory import ConversationBufferMemory
# Set your OpenAI API key
openai_api_key = os.environ['OPENAI_API_KEY']
# Set your organization key
organization_key = os.environ['OPENAI_ORG_KEY']
embeddings = OpenAIEmbeddings(openai_api_key=openai_api_key, organization_key=organization_key)
docsearch = FAISS.load_local("faiss_index_yt", embeddings)
template = """You are a virtual assistant discussing Toyota vehicles.
Please respond to our clients in a good way, Intelligently generate a welcoming phrase.
When discussing about toyota vehicles information or any recomendation, provide accurate information from this knowledge
base: {context}. If a question falls outside this document's scope, kindly reply with 'I'm sorry, but
the available information is limited as I am an AI assistant.'
{chat_history} Human: {human_input} Virtual Assistant:"""
prompt = PromptTemplate(
input_variables=["chat_history", "human_input", "context"], template=template
)
memory = ConversationBufferMemory(memory_key="chat_history", input_key="human_input", max_history=2)
chain = load_qa_chain(
OpenAI(temperature=0.3), chain_type="stuff", memory=memory, prompt=prompt
)
# updated code
import gradio as gr
import requests
import nest_asyncio
import re
# Function to get the image URL from an image search API
def get_image_url(query):
query = re.sub(r'[^\w\s]', '', query)
tundra_images = dict([('trd', 'https://www.toyota.com/imgix/content/dam/toyota/jellies/max/2023/tundra/8424.png?bg=fff&fm=webp&q=90&w=1764'), ('limited', 'https://www.toyota.com/imgix/content/dam/toyota/jellies/max/2023/tundra/8372.png?bg=fff&fm=webp&q=90&w=1764'), ('sr', 'https://www.toyota.com/imgix/content/dam/toyota/jellies/max/2023/tundra/8342.png?bg=fff&fm=webp&q=90&w=1764'),
('sr5', 'https://www.toyota.com/imgix/content/dam/toyota/jellies/max/2023/tundra/8361.png?bg=fff&fm=webp&q=90&w=1764'), ('platinum', 'https://www.toyota.com/imgix/content/dam/toyota/jellies/max/2023/tundra/8375.png?bg=fff&fm=webp&q=90&w=1764'), ('1794', 'https://www.toyota.com/imgix/content/dam/toyota/jellies/max/2023/tundra/8386.png?bg=fff&fm=webp&q=90&w=1764'),
('capstone', 'https://www.toyota.com/imgix/content/dam/toyota/jellies/max/2023/tundra/8425.png?bg=fff&fm=webp&q=90&w=1764')])
model_names = ['sr','sr5','trd','platinum','limited','capstone','1794 edition']
# Split the query into words
words = query.lower().split()
# Find the model names in the list using list comprehension
found_models = [model for model in model_names if model in words ]
# Get URLs and names of found models
model_info = []
for model in found_models:
model_info.append((tundra_images[model], model))
return model_info
with gr.Blocks() as demo:
chatbot = gr.Chatbot()
msg = gr.Textbox()
clear = gr.Button("Clear")
chat_history = []
def user(user_message, history):
# Get response from QA chain
docs = docsearch.similarity_search(user_message)
output = chain({"input_documents": docs, "human_input": user_message}, return_only_outputs=False)
model_info = get_image_url(user_message)
output_text = output['output_text']
output_text = re.sub(r'^\W+', '', output_text)
# Construct the HTML for displaying images
images_html = ""
for image_url, model_name in model_info:
if image_url:
image_html = f"<img src='{image_url}'><br><br>"
images_html += f"Toyota Tundra {model_name.capitalize()} 2023: {image_html}"
# Adding the source link
if docs[0].page_content == 'nan' or docs[1].page_content == 'nan' or docs[0].page_content == '♪':
video_link = ""
else:
video_link = f"Source: {output['input_documents'][0].metadata['video_link']}"
output_text_with_images = f"{output_text}<br>{images_html}{video_link}"
history.append((user_message, output_text_with_images))
return gr.update(value=""), history
msg.submit(user, [msg, chatbot], [msg, chatbot], queue=False)
clear.click(lambda: None, None, chatbot, queue=False)
if __name__ == "__main__":
nest_asyncio.apply()
demo.launch(debug=True, share=True)
|