File size: 8,170 Bytes
1b58b25
 
 
c5ab8cc
1b58b25
 
 
 
 
 
 
ce886f3
1b58b25
f724401
 
1b58b25
 
8079b31
1b58b25
 
bcf36b3
 
 
 
 
 
9614bd6
 
 
ce886f3
1b58b25
 
 
0003aa2
 
ce886f3
0003aa2
b69f44a
 
 
 
0003aa2
ce886f3
0003aa2
ab8f8a8
 
 
487f2b2
 
 
 
1b58b25
 
 
 
0003aa2
1b58b25
0003aa2
 
 
 
 
 
 
 
 
 
 
 
 
1b58b25
 
 
 
 
a7a1d76
1b58b25
a7a1d76
1b58b25
 
 
 
 
a7a1d76
0003aa2
 
 
 
1b58b25
 
 
0003aa2
1b58b25
 
 
 
 
 
0003aa2
 
5609551
0003aa2
0141705
0003aa2
 
 
a7a1d76
 
 
 
 
 
 
67afa07
 
ce886f3
 
a7a1d76
 
 
 
761c492
1b58b25
ce886f3
d6d075e
 
 
ce886f3
 
d6d075e
be482bd
 
 
 
d6d075e
 
 
3ebe6cb
d6d075e
 
 
 
 
 
 
 
 
 
 
 
 
 
c5ab8cc
ab6a1bd
 
 
ce886f3
c5ab8cc
ce886f3
 
 
 
 
ab6a1bd
ce886f3
 
 
 
 
 
329843d
ab6a1bd
5b836f5
1b58b25
ce886f3
 
d35c47f
ce886f3
 
 
 
 
 
 
584785c
1b58b25
d35c47f
1b58b25
 
 
 
63a8cd0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ce886f3
 
 
 
 
 
1b58b25
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
# coding=utf8
from gpt_index import SimpleDirectoryReader, GPTListIndex, GPTSimpleVectorIndex, LLMPredictor, PromptHelper
from langchain import OpenAI
import openai
import gradio as gr
import random
import time
import sys
import os
from transformers import pipeline

p = pipeline("automatic-speech-recognition")

#os.environ["OPENAI_API_KEY"] = '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'
os.environ["OPENAI_API_KEY"] = 'k-XxiwN5pp1VibrIo3Ntw5T3BlbkFJWFa8B5c9BIPzDwGwEUYB'

css = """

#component-2 {position: absolute; bottom: 0;    width: 100%;
}
.app.svelte-ac4rv4>.main.svelte-ac4rv4 {
    display: flex;
    flex-grow: 1;
    flex-direction: column;
    background-image: url(https://i.ibb.co/xj8R4r3/background-vertical.png);
}
div.svelte-1frtwj3 {
    display: inline-flex;
    align-items: center;}

div.float.svelte-1frtwj3 {
    position: absolute;
    opacity: 0;
    top: var(--block-label-margin);
    left: var(--block-label-margin);}

.wrap.svelte-6roggh.svelte-6roggh {
 adding: var(--block-padding);
    height: 100%;
    max-height: 800px;
    overflow-y: auto;
    }

.bot.svelte-6roggh.svelte-6roggh, .pending.svelte-6roggh.svelte-6roggh {
    border-color: var(--border-color-accent);
    background-color: var(--color-accent-soft);
    color: white;
    font-family: initial;
    font-style: italic;
    font: message-box;
    font-weight: bold;
}
div.svelte-1frtwj3 {
    display: inline-flex;
    align-items: center;
    z-index: var(--layer-2);
    box-shadow: var(--block-shadow);
    border: var(--block-label-border-width) solid #ffffff;
    border-top: none;
    border-left: none;
    border-radius: var(--block-label-radius);
    background: #eff6ff;
    padding: var(--block-label-padding);
    pointer-events: none;
    color: var(--block-label-text-color);
    font-weight: var(--block-label-text-weight);
    width: 100%;
    line-height: var(--line-sm);
    }
div.svelte-awbtu4 {
    display: flex;
    flex-direction: inherit;
    flex-wrap: wrap;
    gap: var(--form-gap-width);
    box-shadow: var(--block-shadow);
    border: var(--block-border-width) solid #5f0000;
    border-radius: var(--radius-lg);
    background: #ffffff;
    overflow: hidden;
    position: fixed;
    bottom: 0;
    margin-left: -16px;
}

img.svelte-ms5bsk {
    width: var(--size-full);
    height: 90px;
    object-fit: contain;
}
.app.svelte-ac4rv4.svelte-ac4rv4 {
    max-width: none;
    background-color: #ffffff;
}
.app.svelte-ac4rv4.svelte-ac4rv4{max-width:none}
.wrap.svelte-1o68geq.svelte-1o68geq {max-height: none}
.block.svelte-mppz8v {
    position: relative;
    margin: 0;
    box-shadow: var(--block-shadow);
    border-width: var(--block-border-width);
    border-color: white;
    border-radius: var(--block-radius);
    background: white;
    width: 100%;
    line-height: var(--line-sm);
}
div.bot.svelte-6roggh.svelte-6roggh {
    background: #D9A13D;
}
div.bot.svelte-17nzccn.svelte-17nzccn {
    background: #D9A13D;
}
div.user.svelte-6roggh.svelte-6roggh {
        background: #5F0000;
color: white;    


}
div.user.svelte-17nzccn.svelte-17nzccn {
    background: #5F0000;
}    
"""


def transcribe(audio):
    text = p(audio)["text"]
    return text


def construct_index(directory_path):
    max_input_size = 100000000
    num_outputs = 1000000000
    max_chunk_overlap = 200000000
    chunk_size_limit = 6000000000

    prompt_helper = PromptHelper(max_input_size, num_outputs, max_chunk_overlap, chunk_size_limit=chunk_size_limit)

    llm_predictor = LLMPredictor(llm=OpenAI(temperature=0.0, model_name="text-davinci-004", max_tokens=num_outputs))

    documents = SimpleDirectoryReader(directory_path).load_data()

    index = GPTSimpleVectorIndex.from_documents(documents)
    index.save_to_disk('index.json')

    return index


def chatbot(input_text):
    index = GPTSimpleVectorIndex.load_from_disk('index.json')
    response = index.query(input_text)
    return str(response.response)

def chat(input_text, MaxToken=50, outputs=3):
    MSGS = [
        {"role": "user", "content": input_text},
    ]
    # We use the Chat Completion endpoint for chat like inputs
    response = openai.ChatCompletion.create(
    # model used here is ChatGPT
    # You can use all these models for this endpoint:
    # gpt-4, gpt-4-0314, gpt-4-32k, gpt-4-32k-0314,
    # gpt-3.5-turbo, gpt-3.5-turbo-0301
    model="gpt-3.5-turbo",
    messages=MSGS,
    # max_tokens generated by the AI model
    # maximu value can be 4096 tokens for "gpt-3.5-turbo"
    max_tokens = MaxToken,
    # number of output variations to be generated by AI model
    n = outputs,
    )
    return str(response.choices[0].message.content)
    
with gr.Blocks(css=css) as demo:
    realPath = str(os.path.dirname(os.path.realpath(__file__)))
    img1 = gr.Image("images/header.png", elem_classes=".img.svelte-ms5bsk", elem_id="img.svelte-ms5bsk").style(
        container=False)
    gpt = gr.Chatbot(label=".", elem_classes=".wrap.svelte-1o68geq.svelte-1o68geq", elem_id="chatbot").style(
        container=True)
    msg = gr.Textbox(elem_id="div.svelte-awbtu4", elem_classes="textBoxBot", show_label=False,
                     placeholder="Bem vindo ao assistente virtual OM30, em que posso ajuda-lo?",
                     ).style(container=False)


    # clear = gr.Button("Limpar Conversa")
    gr.Audio(source="microphone", type="filepath",label="ESTÁ COM DIFICULDADES EM ESCREVER? CLIQUE E ME DIGA O QUE DESEJA")
    def respond(message, chat_history):
        chat_history.append((message, chat(message)))
        time.sleep(1)
        vetor = []
        realPath = str(os.path.dirname(os.path.realpath(__file__)))

        #if str(message).upper() == "OLA" or str(message).upper() == "OLÁ" or str(message).upper() == "OI":
        #    vetor = vetor + [((realPath + "\\images\\hippo-apresentacao.mp4",), "")]
        #elif str(message).upper() == "VINHO CASA DEL RONCO PINOT GRIGIO":
        #    vetor = vetor + [((realPath + "\\images\\casa-del-ronco-branco.png",), "")]
        #elif str(message).upper() == "SURVIVOR CHENIN BLANC":
        #    vetor = vetor + [((realPath + "\\images\\survivor-branco.png",), "")]
        #    vetor = vetor + [((realPath + "\\images\\survivor.mp4",), "")]

        #elif str(message).upper() == "VINHO PORTO NOVA VERDE":
        #    vetor = vetor + [((realPath + "\\images\\porta-nova-branco.jpg",), "")]
        #    vetor = vetor + [((realPath + "\\images\\porta-nova-verde.mp4",), "")]

        #elif str(message).upper() == "VINHO QUINTA DO PINTO ARINTO BRANCO":
        #    vetor = vetor + [((realPath + "\\images\\quinta-pinto-arinto-branco.png",), "")]
        #elif str(message).upper() == "VINHO 1492 CHARDONNAY":
        #    vetor = vetor + [((realPath + "\\images\\chardonay-branco.jpg",), "")]
        #elif str(message).upper() == "ME SUGIRA UM VINHO TINTO BOM COM QUEIJO":
        #    vetor = vetor + [((realPath + "\\images\\TNT-CABERNET.png",), "")]
        #    vetor = vetor + [((realPath + "\\images\\vinho-queijo.mp4",), "")]

        #elif str(message).upper() == "VINHO BOM COM CHOCOLATE":
        #    vetor = vetor + [((realPath + "\\images\\TNT-CABERNET.png",), "")]
        #elif str(message).upper() == "VINHO BOM COM PEIXE":
        #    vetor = vetor + [((realPath + "\\images\\luson-branco.png",), "")]
        #    vetor = vetor + [((realPath + "\\images\\vinho-peixe.mp4",), "")]

        #elif str(message).upper() == "VINHAS DO LASSO COLHEITA SELECIONADA":
        #    vetor = vetor + [((realPath + "\\images\\lasso-colheita-rose.png",), "")]
        #elif str(message).upper() == "DOM CAMPOS MOSCATEL":
        #    vetor = vetor + [((realPath + "\\images\\dom-campos-rose.png",), "")]
        #elif str(message).upper() == "BECAS ROSE MEIO SECO":
        #    vetor = vetor + [((realPath + "\\images\\becas-rose.png",), "")]
        #elif str(message).upper() == "PORTA DA RAVESSA":
        #    vetor = vetor + [((realPath + "\\images\\luson-branco.png",), "")]

        return "", chat_history + vetor


    # clear.click(lambda:None, None, gpt, queue=False,)
    msg.submit(respond, [msg, gpt], [msg, gpt])

index = construct_index("docs")
demo.launch()