Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -2,16 +2,32 @@
|
|
2 |
from gpt_index import SimpleDirectoryReader, GPTListIndex, GPTSimpleVectorIndex, LLMPredictor, PromptHelper
|
3 |
from langchain import OpenAI
|
4 |
import gradio as gr
|
|
|
|
|
5 |
import sys
|
6 |
import os
|
|
|
|
|
|
|
7 |
|
8 |
os.environ["OPENAI_API_KEY"] = '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'
|
9 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
10 |
def construct_index(directory_path):
|
11 |
-
max_input_size =
|
12 |
-
num_outputs =
|
13 |
-
max_chunk_overlap =
|
14 |
-
chunk_size_limit =
|
15 |
|
16 |
prompt_helper = PromptHelper(max_input_size, num_outputs, max_chunk_overlap, chunk_size_limit=chunk_size_limit)
|
17 |
|
@@ -26,22 +42,33 @@ def construct_index(directory_path):
|
|
26 |
|
27 |
|
28 |
def chatbot(input_text):
|
|
|
29 |
index = GPTSimpleVectorIndex.load_from_disk('index.json')
|
30 |
response = index.query(input_text)
|
31 |
-
return response.response
|
32 |
|
33 |
-
description = """
|
34 |
|
35 |
-
|
36 |
-
|
37 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
38 |
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
|
|
45 |
|
46 |
index = construct_index("docs")
|
47 |
-
|
|
|
|
2 |
from gpt_index import SimpleDirectoryReader, GPTListIndex, GPTSimpleVectorIndex, LLMPredictor, PromptHelper
|
3 |
from langchain import OpenAI
|
4 |
import gradio as gr
|
5 |
+
import random
|
6 |
+
import time
|
7 |
import sys
|
8 |
import os
|
9 |
+
from transformers import pipeline
|
10 |
+
p = pipeline("automatic-speech-recognition")
|
11 |
+
|
12 |
|
13 |
os.environ["OPENAI_API_KEY"] = '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'
|
14 |
|
15 |
+
md = """This is some code:
|
16 |
+
|
17 |
+
hello
|
18 |
+
|
19 |
+
```py
|
20 |
+
def fn(x, y, z):
|
21 |
+
print(x, y, z)
|
22 |
+
"""
|
23 |
+
def transcribe(audio):
|
24 |
+
text = p(audio)["text"]
|
25 |
+
return text
|
26 |
def construct_index(directory_path):
|
27 |
+
max_input_size = 10000
|
28 |
+
num_outputs = 10000
|
29 |
+
max_chunk_overlap = 20000
|
30 |
+
chunk_size_limit = 600000
|
31 |
|
32 |
prompt_helper = PromptHelper(max_input_size, num_outputs, max_chunk_overlap, chunk_size_limit=chunk_size_limit)
|
33 |
|
|
|
42 |
|
43 |
|
44 |
def chatbot(input_text):
|
45 |
+
|
46 |
index = GPTSimpleVectorIndex.load_from_disk('index.json')
|
47 |
response = index.query(input_text)
|
48 |
+
return str(response.response)
|
49 |
|
|
|
50 |
|
51 |
+
with gr.Blocks() as demo:
|
52 |
+
gpt = gr.Chatbot(label="GPT SUPEr", elem_id="chatbot").style(height=800)
|
53 |
+
msg = gr.Textbox( show_label=False,
|
54 |
+
placeholder="Bem vindo ao ExpoSuper, Qual sua pergunta?",
|
55 |
+
).style(container=False)
|
56 |
+
clear = gr.Button("Limpar Conversa")
|
57 |
+
gr.Audio(source="microphone", type="filepath",label="ESTÁ COM DIFICULDADES EM ESCREVER? CLIQUE E ME DIGA O QUE DESEJA")
|
58 |
+
def respond(message, chat_history):
|
59 |
+
chat_history.append((message, chatbot(message)))
|
60 |
+
time.sleep(1)
|
61 |
+
vetor = []
|
62 |
+
realPath = str(os.path.dirname(os.path.realpath(__file__)))
|
63 |
|
64 |
+
if str(message).upper()=="OLA" or str(message).upper()=="OLÁ" or str(message).upper()=="OI":
|
65 |
+
vetor = vetor + [((realPath + "\\images\\logo.png",), "")]
|
66 |
+
|
67 |
+
return "", chat_history+vetor
|
68 |
+
|
69 |
+
clear.click(lambda:None, None, gpt, queue=False,)
|
70 |
+
msg.submit(respond, [msg, gpt], [msg,gpt])
|
71 |
|
72 |
index = construct_index("docs")
|
73 |
+
demo.launch()
|
74 |
+
|