cd2-ai-hippo / app.py
Renato Cardoso Zimmer
#ajuste#
3cb6896
raw
history blame
5.62 kB
# coding=utf8
from llama_index import load_index_from_storage, SimpleDirectoryReader, readers, GPTVectorStoreIndex,StorageContext, ServiceContext, LLMPredictor, PromptHelper
from langchain import OpenAI
import gradio as gr
import random
import time
import sys
import os
from transformers import pipeline
p = pipeline("automatic-speech-recognition")
os.environ["OPENAI_API_KEY"]
css = """
#component-2 {position: absolute; bottom: 0; width: 100%;
}
.app.svelte-ac4rv4>.main.svelte-ac4rv4 {
display: flex;
flex-grow: 1;
flex-direction: column;
background-image: url(https://i.ibb.co/xj8R4r3/background-vertical.png);
}
div.svelte-1frtwj3 {
display: inline-flex;
align-items: center;}
div.float.svelte-1frtwj3 {
position: absolute;
opacity: 0;
top: var(--block-label-margin);
left: var(--block-label-margin);}
.wrap.svelte-6roggh.svelte-6roggh {
adding: var(--block-padding);
height: 100%;
max-height: 800px;
overflow-y: auto;
}
.bot.svelte-6roggh.svelte-6roggh, .pending.svelte-6roggh.svelte-6roggh {
border-color: var(--border-color-accent);
background-color: var(--color-accent-soft);
color: white;
font-family: initial;
font-style: italic;
font: message-box;
font-weight: bold;
}
div.svelte-1frtwj3 {
display: inline-flex;
align-items: center;
z-index: var(--layer-2);
box-shadow: var(--block-shadow);
border: var(--block-label-border-width) solid #ffffff;
border-top: none;
border-left: none;
border-radius: var(--block-label-radius);
background: #eff6ff;
padding: var(--block-label-padding);
pointer-events: none;
color: var(--block-label-text-color);
font-weight: var(--block-label-text-weight);
width: 100%;
line-height: var(--line-sm);
}
div.svelte-awbtu4 {
display: flex;
flex-direction: inherit;
flex-wrap: wrap;
gap: var(--form-gap-width);
box-shadow: var(--block-shadow);
border: var(--block-border-width) solid #5f0000;
border-radius: var(--radius-lg);
background: #ffffff;
overflow: hidden;
position: fixed;
bottom: 0;
margin-left: -16px;
}
img.svelte-ms5bsk {
width: var(--size-full);
height: 90px;
object-fit: contain;
}
.app.svelte-ac4rv4.svelte-ac4rv4 {
max-width: none;
background-color: #ffffff;
}
.app.svelte-ac4rv4.svelte-ac4rv4{max-width:none}
.wrap.svelte-1o68geq.svelte-1o68geq {max-height: none}
.block.svelte-mppz8v {
position: relative;
margin: 0;
box-shadow: var(--block-shadow);
border-width: var(--block-border-width);
border-color: white;
border-radius: var(--block-radius);
background: white;
width: 100%;
line-height: var(--line-sm);
}
div.bot.svelte-6roggh.svelte-6roggh {
background: #D9A13D;
}
div.bot.svelte-17nzccn.svelte-17nzccn {
background: #D9A13D;
}
div.user.svelte-6roggh.svelte-6roggh {
background: #5F0000;
color: white;
}
div.user.svelte-17nzccn.svelte-17nzccn {
background: #5F0000;
}
"""
def transcribe(audio):
text = p(audio)["text"]
return text
def construct_index(directory_path):
num_outputs = 2000
prompt_helper = PromptHelper(context_window=3900, num_output=256, max_chunk_overlap=20, chunk_size_limit=1024)
llm_predictor = LLMPredictor(llm=OpenAI(temperature=0.0, model_name="gpt-3.5-turbo-16k", max_tokens=num_outputs))
documents = SimpleDirectoryReader(directory_path).load_data()
service_context = ServiceContext.from_defaults(llm_predictor=llm_predictor, prompt_helper=prompt_helper)
index = GPTVectorStoreIndex.from_documents(documents, service_context=service_context, prompt_helper=prompt_helper)
index.storage_context.persist(persist_dir='index.json')
return index
def chatbot(input_text):
num_outputs = 4097
prompt_helper = PromptHelper(context_window=3900, num_output=256, max_chunk_overlap=20, chunk_size_limit=1024)
llm_predictor = LLMPredictor(llm=OpenAI(temperature=0.0, model_name="gpt-3.5-turbo-16k", max_tokens=num_outputs))
service_context = ServiceContext.from_defaults(llm_predictor=llm_predictor, prompt_helper=prompt_helper)
storage_context = StorageContext.from_defaults(persist_dir='index.json')
# load index
index = load_index_from_storage(storage_context)
query_engine = index.as_query_engine(service_context=service_context, verbose=True, response_mode="compact")
response = query_engine.query(input_text)
return str(response.response)
with gr.Blocks(css=css) as demo:
realPath = str(os.path.dirname(os.path.realpath(__file__)))
img1 = gr.Image("images/1024x150_cabeçalho.hippo.png", elem_classes=".img.svelte-ms5bsk", elem_id="img.svelte-ms5bsk").style(container=False)
gpt = gr.Chatbot(label = ".", elem_classes=".wrap.svelte-1o68geq.svelte-1o68geq", elem_id="chatbot").style(container=True)
msg = gr.Textbox(elem_id="div.svelte-awbtu4",elem_classes="textBoxBot", show_label=False,
placeholder="Bem vindo ao Hippo Supermercados, em que posso ajuda-lo?",
).style(container=False)
#clear = gr.Button("Limpar Conversa")
# gr.Audio(source="microphone", type="filepath",label="ESTÁ COM DIFICULDADES EM ESCREVER? CLIQUE E ME DIGA O QUE DESEJA")
def respond(message, chat_history):
chat_history.append((message, chatbot(message)))
time.sleep(1)
return "", chat_history
# clear.click(lambda:None, None, gpt, queue=False,)
msg.submit(respond, [msg, gpt], [msg,gpt])
index = construct_index("docs")
demo.launch()