artificialguybr commited on
Commit
fc26c64
1 Parent(s): 32fc9d9

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +12 -18
app.py CHANGED
@@ -16,17 +16,10 @@ headers = {
16
  BASE_SYSTEM_MESSAGE = "I carefully provide accurate, factual, thoughtful, nuanced answers and am brilliant at reasoning."
17
 
18
  def call_nvidia_api(message, history_api, system_message, max_tokens, temperature, top_p):
19
- messages = []
20
- if system_message: # Adiciona a mensagem do sistema, se houver
21
- messages.append({"role": "system", "content": system_message})
22
- # Adiciona as mensagens do histórico
23
  for msg in history_api:
24
- messages.append({"role": "user", "content": msg[0]})
25
- if msg[1]: # Garante que não adicionamos respostas vazias
26
- messages.append({"role": "assistant", "content": msg[1]})
27
-
28
- # Adiciona a mensagem atual do usuário
29
- messages.append({"role": "user", "content": message})
30
 
31
  payload = {
32
  "messages": messages,
@@ -51,24 +44,25 @@ def call_nvidia_api(message, history_api, system_message, max_tokens, temperatur
51
  else:
52
  return "Desculpe, ocorreu um erro ao gerar a resposta."
53
 
54
-
55
  def chatbot_function(message, history_api, system_message, max_tokens, temperature, top_p):
56
  assistant_message = call_nvidia_api(message, history_api, system_message, max_tokens, temperature, top_p)
57
  history_api.append([message, assistant_message])
58
  return assistant_message, history_api
59
 
60
- system_msg = gr.Textbox(BASE_SYSTEM_MESSAGE, label="System Message", placeholder="System prompt.", lines=5)
61
- max_tokens = gr.Slider(20, 1024, label="Max Tokens", step=20, value=1024)
62
- temperature = gr.Slider(0.0, 1.0, label="Temperature", step=0.1, value=0.2)
63
- top_p = gr.Slider(0.0, 1.0, label="Top P", step=0.05, value=0.7)
64
 
65
  with gr.Blocks() as demo:
66
  chat_history_state = gr.State([])
67
  chat_interface = gr.ChatInterface(
68
  fn=chatbot_function,
69
- chatbot=gr.Chatbot(value=chat_history_state),
70
- additional_inputs=[system_msg, max_tokens, temperature, top_p],
71
  title="LLAMA 70B Free Demo",
 
 
72
  )
73
 
74
- demo.launch()
 
16
  BASE_SYSTEM_MESSAGE = "I carefully provide accurate, factual, thoughtful, nuanced answers and am brilliant at reasoning."
17
 
18
  def call_nvidia_api(message, history_api, system_message, max_tokens, temperature, top_p):
19
+ messages = [{"role": "system", "content": system_message}] if system_message else []
20
+ messages.extend([{"role": "user", "content": message}])
 
 
21
  for msg in history_api:
22
+ messages.extend([{"role": "user", "content": msg[0]}, {"role": "assistant", "content": msg[1]}])
 
 
 
 
 
23
 
24
  payload = {
25
  "messages": messages,
 
44
  else:
45
  return "Desculpe, ocorreu um erro ao gerar a resposta."
46
 
 
47
  def chatbot_function(message, history_api, system_message, max_tokens, temperature, top_p):
48
  assistant_message = call_nvidia_api(message, history_api, system_message, max_tokens, temperature, top_p)
49
  history_api.append([message, assistant_message])
50
  return assistant_message, history_api
51
 
52
+ system_msg = gr.Textbox(value=BASE_SYSTEM_MESSAGE, label="System Message", placeholder="System prompt.", lines=5)
53
+ max_tokens = gr.Slider(minimum=20, maximum=1024, label="Max Tokens", step=20, value=1024)
54
+ temperature = gr.Slider(minimum=0.0, maximum=1.0, label="Temperature", step=0.1, value=0.2)
55
+ top_p = gr.Slider(minimum=0.0, maximum=1.0, label="Top P", step=0.05, value=0.7)
56
 
57
  with gr.Blocks() as demo:
58
  chat_history_state = gr.State([])
59
  chat_interface = gr.ChatInterface(
60
  fn=chatbot_function,
61
+ inputs=["message", "history_api", system_msg, max_tokens, temperature, top_p],
62
+ outputs=["assistant_message", "history_api"],
63
  title="LLAMA 70B Free Demo",
64
+ description="Explore the capabilities of LLAMA 2 70B",
65
+ additional_inputs=[system_msg, max_tokens, temperature, top_p]
66
  )
67
 
68
+ demo.launch()