File size: 3,115 Bytes
8ad03c1
3cb6896
8ad03c1
 
 
 
 
 
 
 
 
3cb6896
8ad03c1
 
 
 
3cb6896
 
 
 
8ad03c1
8b4e43a
3cb6896
8ad03c1
 
 
3cb6896
 
 
 
8ad03c1
 
 
d551a1b
 
 
 
 
8ad03c1
3cb6896
 
 
 
b00f3a4
3cb6896
 
 
 
 
8ad03c1
3cb6896
 
8ad03c1
 
09ba70b
8ad03c1
d551a1b
8ad03c1
 
 
 
 
 
 
 
 
3cb6896
 
8ad03c1
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
# coding=utf8
from llama_index import load_index_from_storage, SimpleDirectoryReader, readers, GPTVectorStoreIndex,StorageContext, ServiceContext, LLMPredictor, PromptHelper
from langchain import OpenAI
import gradio as gr
import random
import time
import sys
import os
from transformers import pipeline
p = pipeline("automatic-speech-recognition")

os.environ["OPENAI_API_KEY"]

def transcribe(audio):
    text = p(audio)["text"]
    return text
def construct_index(directory_path):    
    num_outputs = 2000
    
    prompt_helper = PromptHelper(context_window=3900, num_output=256, max_chunk_overlap=20, chunk_size_limit=1024)

    llm_predictor = LLMPredictor(llm=OpenAI(temperature=0.0, model_name="text-davinci-003", max_tokens=num_outputs, top_p=1, frequency_penalty=0, presence_penalty=0))
    

    documents = SimpleDirectoryReader(directory_path).load_data()

    service_context = ServiceContext.from_defaults(llm_predictor=llm_predictor, prompt_helper=prompt_helper)
    index = GPTVectorStoreIndex.from_documents(documents, service_context=service_context, prompt_helper=prompt_helper)

    index.storage_context.persist(persist_dir='index.json')

    return index

# Load CSS from a separate file
css_file = open("src/css/styles.css", "r")
css = css_file.read()
css_file.close()


def chatbot(input_text):    
    num_outputs = 4097    

    prompt_helper = PromptHelper(context_window=3900, num_output=256, max_chunk_overlap=20, chunk_size_limit=1024)
    llm_predictor = LLMPredictor(llm=OpenAI(temperature=0.0, model_name="gpt-3.5-turbo-16k", max_tokens=num_outputs))
    service_context = ServiceContext.from_defaults(llm_predictor=llm_predictor, prompt_helper=prompt_helper)
    storage_context = StorageContext.from_defaults(persist_dir='index.json')
    
    # load index
    index = load_index_from_storage(storage_context)

    query_engine = index.as_query_engine(service_context=service_context, verbose=True, response_mode="compact")    
    response = query_engine.query(input_text)
    return str(response.response)

with gr.Blocks(css=css, title='Adega Hippo') as demo:
    realPath = str(os.path.dirname(os.path.realpath(__file__)))
    img1 = gr.Image("images/adega_hippo.png", elem_classes=".img.svelte-ms5bsk", elem_id="img.svelte-ms5bsk").style(container=False)
    gpt = gr.Chatbot(label = ".", elem_classes=".wrap.svelte-1o68geq.svelte-1o68geq", elem_id="chatbot").style(container=True)
    msg = gr.Textbox(elem_id="div.svelte-awbtu4",elem_classes="textBoxBot", show_label=False,
                placeholder="Bem vindo ao Hippo Supermercados, em que posso ajuda-lo?",
            ).style(container=False)
    #clear = gr.Button("Limpar Conversa")
   # gr.Audio(source="microphone", type="filepath",label="ESTÁ COM DIFICULDADES EM ESCREVER? CLIQUE E ME DIGA O QUE DESEJA")
    def respond(message, chat_history):
        chat_history.append((message, chatbot(message)))
        time.sleep(1)

        return "", chat_history

   # clear.click(lambda:None, None, gpt, queue=False,)
    msg.submit(respond, [msg, gpt], [msg,gpt])

index = construct_index("docs")
demo.launch()