# coding=utf8 from gpt_index import SimpleDirectoryReader, GPTListIndex, GPTSimpleVectorIndex, LLMPredictor, PromptHelper from langchain import OpenAI import openai import gradio as gr import random import time import sys import os from transformers import pipeline p = pipeline("automatic-speech-recognition") #os.environ["OPENAI_API_KEY"] = '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' os.environ["OPENAI_API_KEY"] = 'k-XxiwN5pp1VibrIo3Ntw5T3BlbkFJWFa8B5c9BIPzDwGwEUYB' css = """ #component-2 {position: absolute; bottom: 0; width: 100%; } .app.svelte-ac4rv4>.main.svelte-ac4rv4 { display: flex; flex-grow: 1; flex-direction: column; background-image: url(https://i.ibb.co/xj8R4r3/background-vertical.png); } div.svelte-1frtwj3 { display: inline-flex; align-items: center;} div.float.svelte-1frtwj3 { position: absolute; opacity: 0; top: var(--block-label-margin); left: var(--block-label-margin);} .wrap.svelte-6roggh.svelte-6roggh { adding: var(--block-padding); height: 100%; max-height: 800px; overflow-y: auto; } .bot.svelte-6roggh.svelte-6roggh, .pending.svelte-6roggh.svelte-6roggh { border-color: var(--border-color-accent); background-color: var(--color-accent-soft); color: white; font-family: initial; font-style: italic; font: message-box; font-weight: bold; } div.svelte-1frtwj3 { display: inline-flex; align-items: center; z-index: var(--layer-2); box-shadow: var(--block-shadow); border: var(--block-label-border-width) solid #ffffff; border-top: none; border-left: none; border-radius: var(--block-label-radius); background: #eff6ff; padding: var(--block-label-padding); pointer-events: none; color: var(--block-label-text-color); font-weight: var(--block-label-text-weight); width: 100%; line-height: var(--line-sm); } div.svelte-awbtu4 { display: flex; flex-direction: inherit; flex-wrap: wrap; gap: var(--form-gap-width); box-shadow: var(--block-shadow); border: var(--block-border-width) solid #5f0000; border-radius: var(--radius-lg); background: #ffffff; overflow: hidden; position: fixed; bottom: 0; margin-left: -16px; } img.svelte-ms5bsk { width: var(--size-full); height: 90px; object-fit: contain; } .app.svelte-ac4rv4.svelte-ac4rv4 { max-width: none; background-color: #ffffff; } .app.svelte-ac4rv4.svelte-ac4rv4{max-width:none} .wrap.svelte-1o68geq.svelte-1o68geq {max-height: none} .block.svelte-mppz8v { position: relative; margin: 0; box-shadow: var(--block-shadow); border-width: var(--block-border-width); border-color: white; border-radius: var(--block-radius); background: white; width: 100%; line-height: var(--line-sm); } div.bot.svelte-6roggh.svelte-6roggh { background: #D9A13D; } div.bot.svelte-17nzccn.svelte-17nzccn { background: #D9A13D; } div.user.svelte-6roggh.svelte-6roggh { background: #5F0000; color: white; } div.user.svelte-17nzccn.svelte-17nzccn { background: #5F0000; } """ def transcribe(audio): text = p(audio)["text"] return text def construct_index(directory_path): max_input_size = 100000000 num_outputs = 1000000000 max_chunk_overlap = 200000000 chunk_size_limit = 6000000000 prompt_helper = PromptHelper(max_input_size, num_outputs, max_chunk_overlap, chunk_size_limit=chunk_size_limit) llm_predictor = LLMPredictor(llm=OpenAI(temperature=0.0, model_name="text-davinci-004", max_tokens=num_outputs)) documents = SimpleDirectoryReader(directory_path).load_data() index = GPTSimpleVectorIndex.from_documents(documents) index.save_to_disk('index.json') return index def chatbot(input_text): index = GPTSimpleVectorIndex.load_from_disk('index.json') response = index.query(input_text) return str(response.response) def chat(input_text, MaxToken=50, outputs=3): MSGS = [ {"role": "user", "content": input_text}, ] # We use the Chat Completion endpoint for chat like inputs response = openai.ChatCompletion.create( # model used here is ChatGPT # You can use all these models for this endpoint: # gpt-4, gpt-4-0314, gpt-4-32k, gpt-4-32k-0314, # gpt-3.5-turbo, gpt-3.5-turbo-0301 model="gpt-3.5-turbo", messages=MSGS, # max_tokens generated by the AI model # maximu value can be 4096 tokens for "gpt-3.5-turbo" max_tokens = MaxToken, # number of output variations to be generated by AI model n = outputs, ) return str(response.choices[0].message.content) with gr.Blocks(css=css) as demo: realPath = str(os.path.dirname(os.path.realpath(__file__))) img1 = gr.Image("images/header.png", elem_classes=".img.svelte-ms5bsk", elem_id="img.svelte-ms5bsk").style( container=False) gpt = gr.Chatbot(label=".", elem_classes=".wrap.svelte-1o68geq.svelte-1o68geq", elem_id="chatbot").style( container=True) msg = gr.Textbox(elem_id="div.svelte-awbtu4", elem_classes="textBoxBot", show_label=False, placeholder="Bem vindo ao assistente virtual OM30, em que posso ajuda-lo?", ).style(container=False) # clear = gr.Button("Limpar Conversa") gr.Audio(source="microphone", type="filepath",label="ESTÁ COM DIFICULDADES EM ESCREVER? CLIQUE E ME DIGA O QUE DESEJA") def respond(message, chat_history): chat_history.append((message, chat(message))) time.sleep(1) vetor = [] realPath = str(os.path.dirname(os.path.realpath(__file__))) #if str(message).upper() == "OLA" or str(message).upper() == "OLÁ" or str(message).upper() == "OI": # vetor = vetor + [((realPath + "\\images\\hippo-apresentacao.mp4",), "")] #elif str(message).upper() == "VINHO CASA DEL RONCO PINOT GRIGIO": # vetor = vetor + [((realPath + "\\images\\casa-del-ronco-branco.png",), "")] #elif str(message).upper() == "SURVIVOR CHENIN BLANC": # vetor = vetor + [((realPath + "\\images\\survivor-branco.png",), "")] # vetor = vetor + [((realPath + "\\images\\survivor.mp4",), "")] #elif str(message).upper() == "VINHO PORTO NOVA VERDE": # vetor = vetor + [((realPath + "\\images\\porta-nova-branco.jpg",), "")] # vetor = vetor + [((realPath + "\\images\\porta-nova-verde.mp4",), "")] #elif str(message).upper() == "VINHO QUINTA DO PINTO ARINTO BRANCO": # vetor = vetor + [((realPath + "\\images\\quinta-pinto-arinto-branco.png",), "")] #elif str(message).upper() == "VINHO 1492 CHARDONNAY": # vetor = vetor + [((realPath + "\\images\\chardonay-branco.jpg",), "")] #elif str(message).upper() == "ME SUGIRA UM VINHO TINTO BOM COM QUEIJO": # vetor = vetor + [((realPath + "\\images\\TNT-CABERNET.png",), "")] # vetor = vetor + [((realPath + "\\images\\vinho-queijo.mp4",), "")] #elif str(message).upper() == "VINHO BOM COM CHOCOLATE": # vetor = vetor + [((realPath + "\\images\\TNT-CABERNET.png",), "")] #elif str(message).upper() == "VINHO BOM COM PEIXE": # vetor = vetor + [((realPath + "\\images\\luson-branco.png",), "")] # vetor = vetor + [((realPath + "\\images\\vinho-peixe.mp4",), "")] #elif str(message).upper() == "VINHAS DO LASSO COLHEITA SELECIONADA": # vetor = vetor + [((realPath + "\\images\\lasso-colheita-rose.png",), "")] #elif str(message).upper() == "DOM CAMPOS MOSCATEL": # vetor = vetor + [((realPath + "\\images\\dom-campos-rose.png",), "")] #elif str(message).upper() == "BECAS ROSE MEIO SECO": # vetor = vetor + [((realPath + "\\images\\becas-rose.png",), "")] #elif str(message).upper() == "PORTA DA RAVESSA": # vetor = vetor + [((realPath + "\\images\\luson-branco.png",), "")] return "", chat_history + vetor # clear.click(lambda:None, None, gpt, queue=False,) msg.submit(respond, [msg, gpt], [msg, gpt]) index = construct_index("docs") demo.launch()