artificialguybr commited on
Commit
9faed3d
1 Parent(s): 8cd9af7

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +16 -29
app.py CHANGED
@@ -15,12 +15,9 @@ headers = {
15
 
16
  BASE_SYSTEM_MESSAGE = "I carefully provide accurate, factual, thoughtful, nuanced answers and am brilliant at reasoning."
17
 
18
- def call_nvidia_api(history_api, system_message, max_tokens, temperature, top_p):
19
  messages = [{"role": "system", "content": system_message}] if system_message else []
20
- for msg in history_api:
21
- if msg[1]: # Se existe uma resposta do assistente
22
- messages.append({"role": "user", "content": msg[0]})
23
- messages.append({"role": "assistant", "content": msg[1]})
24
 
25
  payload = {
26
  "messages": messages,
@@ -41,37 +38,27 @@ def call_nvidia_api(history_api, system_message, max_tokens, temperature, top_p)
41
 
42
  if response_body.get("choices"):
43
  assistant_message = response_body["choices"][0]["message"]["content"]
44
- return assistant_message, response_body["choices"][0]
45
  else:
46
- return "Desculpe, ocorreu um erro ao gerar a resposta.", None
47
 
48
- def chatbot_submit(message, chat_history_ui, chat_history_api, system_message, max_tokens_val, temperature_val, top_p_val):
49
- assistant_message, api_response = call_nvidia_api(chat_history_api, system_message, max_tokens_val, temperature_val, top_p_val)
50
-
51
- chat_history_ui.append([message, assistant_message])
52
-
53
- if api_response:
54
- chat_history_api.append([message, assistant_message])
55
-
56
- return assistant_message, chat_history_ui, chat_history_api
57
 
58
  system_msg = gr.Textbox(BASE_SYSTEM_MESSAGE, label="System Message", placeholder="System prompt.", lines=5)
59
  max_tokens = gr.Slider(20, 1024, label="Max Tokens", step=20, value=1024)
60
  temperature = gr.Slider(0.0, 1.0, label="Temperature", step=0.1, value=0.2)
61
  top_p = gr.Slider(0.0, 1.0, label="Top P", step=0.05, value=0.7)
62
- # Gradio interface setup
63
  with gr.Blocks() as demo:
64
- chat_history_state_ui = gr.State([])
65
- chat_history_state_api = gr.State([])
66
- system_msg = gr.Textbox(BASE_SYSTEM_MESSAGE, label="System Message", placeholder="System prompt.", lines=5)
67
- max_tokens = gr.Slider(20, 1024, label="Max Tokens", step=20, value=1024)
68
- temperature = gr.Slider(0.0, 1.0, label="Temperature", step=0.1, value=0.2)
69
- top_p = gr.Slider(0.0, 1.0, label="Top P", step=0.05, value=0.7)
70
- chatbot = gr.ChatInterface(
71
- fn=chatbot_submit,
72
- inputs=[gr.Textbox(label="Your Message"), chat_history_state_ui, chat_history_state_api, system_msg, max_tokens, temperature, top_p],
73
- outputs=[gr.Text(label="Assistant Response"), chat_history_state_ui, chat_history_state_api],
74
- title="Chatbot Interface"
75
  )
76
 
77
- demo.launch()
 
15
 
16
  BASE_SYSTEM_MESSAGE = "I carefully provide accurate, factual, thoughtful, nuanced answers and am brilliant at reasoning."
17
 
18
+ def call_nvidia_api(message, history_api, system_message, max_tokens, temperature, top_p):
19
  messages = [{"role": "system", "content": system_message}] if system_message else []
20
+ messages.extend([{"role": "user", "content": msg[0]}, {"role": "assistant", "content": msg[1]} for msg in history_api])
 
 
 
21
 
22
  payload = {
23
  "messages": messages,
 
38
 
39
  if response_body.get("choices"):
40
  assistant_message = response_body["choices"][0]["message"]["content"]
41
+ return assistant_message
42
  else:
43
+ return "Desculpe, ocorreu um erro ao gerar a resposta."
44
 
45
+ def chatbot_function(message, history_api, system_message, max_tokens, temperature, top_p):
46
+ assistant_message = call_nvidia_api(message, history_api, system_message, max_tokens, temperature, top_p)
47
+ history_api.append([message, assistant_message])
48
+ return assistant_message, history_api
 
 
 
 
 
49
 
50
  system_msg = gr.Textbox(BASE_SYSTEM_MESSAGE, label="System Message", placeholder="System prompt.", lines=5)
51
  max_tokens = gr.Slider(20, 1024, label="Max Tokens", step=20, value=1024)
52
  temperature = gr.Slider(0.0, 1.0, label="Temperature", step=0.1, value=0.2)
53
  top_p = gr.Slider(0.0, 1.0, label="Top P", step=0.05, value=0.7)
54
+
55
  with gr.Blocks() as demo:
56
+ chat_history_state = gr.State([])
57
+ chat_interface = gr.ChatInterface(
58
+ fn=chatbot_function,
59
+ chatbot=gr.Chatbot(history=chat_history_state),
60
+ additional_inputs=[system_msg, max_tokens, temperature, top_p],
61
+ title="LLAMA 70B Free Demo",
 
 
 
 
 
62
  )
63
 
64
+ demo.launch()