# coding=utf8 from llama_index import load_index_from_storage, SimpleDirectoryReader, readers, GPTVectorStoreIndex,StorageContext, ServiceContext, LLMPredictor, PromptHelper from langchain import OpenAI import gradio as gr import random import time import sys import os from transformers import pipeline p = pipeline("automatic-speech-recognition") os.environ["OPENAI_API_KEY"] def transcribe(audio): text = p(audio)["text"] return text def construct_index(directory_path): num_outputs = 2000 prompt_helper = PromptHelper(context_window=3900, num_output=256, max_chunk_overlap=20, chunk_size_limit=1024) llm_predictor = LLMPredictor(llm=OpenAI(temperature=0.0, model_name="text-davinci-003", max_tokens=num_outputs, top_p=1, frequency_penalty=0, presence_penalty=0)) documents = SimpleDirectoryReader(directory_path).load_data() service_context = ServiceContext.from_defaults(llm_predictor=llm_predictor, prompt_helper=prompt_helper) index = GPTVectorStoreIndex.from_documents(documents, service_context=service_context, prompt_helper=prompt_helper) index.storage_context.persist(persist_dir='index.json') return index # Load CSS from a separate file css_file = open("src/css/styles.css", "r") css = css_file.read() css_file.close() def chatbot(input_text): num_outputs = 4097 prompt_helper = PromptHelper(context_window=3900, num_output=256, max_chunk_overlap=20, chunk_size_limit=1024) llm_predictor = LLMPredictor(llm=OpenAI(temperature=0.0, model_name="gpt-3.5-turbo-16k", max_tokens=num_outputs)) service_context = ServiceContext.from_defaults(llm_predictor=llm_predictor, prompt_helper=prompt_helper) storage_context = StorageContext.from_defaults(persist_dir='index.json') # load index index = load_index_from_storage(storage_context) query_engine = index.as_query_engine(service_context=service_context, verbose=True, response_mode="compact") response = query_engine.query(input_text) return str(response.response) with gr.Blocks(css=css, title='Adega Hippo') as demo: realPath = str(os.path.dirname(os.path.realpath(__file__))) img1 = gr.Image("images/adega_hippo.png", elem_classes=".img.svelte-ms5bsk", elem_id="img.svelte-ms5bsk").style(container=False) gpt = gr.Chatbot(label = ".", elem_classes=".wrap.svelte-1o68geq.svelte-1o68geq", elem_id="chatbot").style(container=True) msg = gr.Textbox(elem_id="div.svelte-awbtu4",elem_classes="textBoxBot", show_label=False, placeholder="Bem vindo ao Hippo Supermercados, em que posso ajuda-lo?", ).style(container=False) #clear = gr.Button("Limpar Conversa") # gr.Audio(source="microphone", type="filepath",label="ESTÁ COM DIFICULDADES EM ESCREVER? CLIQUE E ME DIGA O QUE DESEJA") def respond(message, chat_history): chat_history.append((message, chatbot(message))) time.sleep(1) return "", chat_history # clear.click(lambda:None, None, gpt, queue=False,) msg.submit(respond, [msg, gpt], [msg,gpt]) index = construct_index("docs") demo.launch()