artificialguybr commited on
Commit
020a962
1 Parent(s): 1dec7dc

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +26 -16
app.py CHANGED
@@ -1,7 +1,7 @@
1
  import gradio as gr
2
  import requests
3
- import os
4
  import json
 
5
 
6
  # API and environment variables
7
  API_KEY = os.getenv('API_KEY')
@@ -16,18 +16,22 @@ headers = {
16
  # Base system message
17
  BASE_SYSTEM_MESSAGE = "I carefully provide accurate, factual, thoughtful, nuanced answers and am brilliant at reasoning."
18
 
19
- def call_nvidia_api(history, system_message, max_tokens, temperature, top_p):
20
- """Calls the NVIDIA API to generate a response."""
21
- messages = [{"role": "system", "content": system_message}]
22
- messages.extend([{"role": "user", "content": msg} for msg, _ in history])
 
23
 
 
 
24
  payload = {
25
- "messages": messages,
26
  "temperature": temperature,
27
  "top_p": top_p,
28
  "max_tokens": max_tokens,
29
  "stream": False
30
  }
 
31
  session = requests.Session()
32
  response = session.post(INVOKE_URL, headers=headers, json=payload)
33
  while response.status_code == 202:
@@ -36,31 +40,37 @@ def call_nvidia_api(history, system_message, max_tokens, temperature, top_p):
36
  response = session.get(fetch_url, headers=headers)
37
  response.raise_for_status()
38
  response_body = response.json()
 
39
  if response_body.get("choices"):
40
  assistant_message = response_body["choices"][0]["message"]["content"]
41
  return assistant_message
42
  else:
43
- return "Sorry, there was an error generating the response."
44
 
45
- def chatbot_submit(message, chat_history, system_message, max_tokens_val, temperature_val, top_p_val):
46
- """Submits the user message to the chatbot and updates the chat history."""
47
- chat_history.append([message, ""]) # Add user message to history
 
48
 
49
- # Call NVIDIA API to generate a response
50
- assistant_message = call_nvidia_api(chat_history, system_message, max_tokens_val, temperature_val, top_p_val)
51
 
52
- # Update history with assistant's response
 
 
 
53
  chat_history[-1][1] = assistant_message
54
 
55
- return assistant_message, chat_history
 
 
 
56
 
57
- # Gradio interface setup
58
  chat_history_state = gr.State([])
59
  system_msg = gr.Textbox(BASE_SYSTEM_MESSAGE, label="System Message", placeholder="System prompt.", lines=5)
60
  max_tokens = gr.Slider(20, 1024, label="Max Tokens", step=20, value=1024)
61
  temperature = gr.Slider(0.0, 1.0, label="Temperature", step=0.1, value=0.2)
62
  top_p = gr.Slider(0.0, 1.0, label="Top P", step=0.05, value=0.7)
63
-
64
  with gr.Blocks() as demo:
65
  chat_history_state = gr.State([])
66
  chatbot = gr.ChatInterface(
 
1
  import gradio as gr
2
  import requests
 
3
  import json
4
+ import os
5
 
6
  # API and environment variables
7
  API_KEY = os.getenv('API_KEY')
 
16
  # Base system message
17
  BASE_SYSTEM_MESSAGE = "I carefully provide accurate, factual, thoughtful, nuanced answers and am brilliant at reasoning."
18
 
19
+ def clear_chat():
20
+ """Clears the chat history and message state."""
21
+ print("Clearing chat...")
22
+ chat_history_state.value = []
23
+ chatbot.textbox.value = ""
24
 
25
+ def call_nvidia_api(api_history, max_tokens, temperature, top_p):
26
+ """Calls the NVIDIA API to generate a response."""
27
  payload = {
28
+ "messages": api_history,
29
  "temperature": temperature,
30
  "top_p": top_p,
31
  "max_tokens": max_tokens,
32
  "stream": False
33
  }
34
+ print(f"Payload enviado: {payload}")
35
  session = requests.Session()
36
  response = session.post(INVOKE_URL, headers=headers, json=payload)
37
  while response.status_code == 202:
 
40
  response = session.get(fetch_url, headers=headers)
41
  response.raise_for_status()
42
  response_body = response.json()
43
+ print(f"Payload recebido: {response_body}")
44
  if response_body.get("choices"):
45
  assistant_message = response_body["choices"][0]["message"]["content"]
46
  return assistant_message
47
  else:
48
+ return "Desculpe, ocorreu um erro ao gerar a resposta."
49
 
50
+ def chatbot_submit(message, chat_history, api_history, system_message, max_tokens_val, temperature_val, top_p_val):
51
+ """Submits the user message and updates both histories."""
52
+ # Update Gradio history
53
+ chat_history.append([message, ""])
54
 
55
+ # Update API history
56
+ api_history.append({"role": "user", "content": message})
57
 
58
+ # Call NVIDIA API
59
+ assistant_message = call_nvidia_api(api_history, max_tokens_val, temperature_val, top_p_val)
60
+
61
+ # Update Gradio history with response
62
  chat_history[-1][1] = assistant_message
63
 
64
+ # Update API history with response
65
+ api_history.append({"role": "assistant", "content": assistant_message})
66
+
67
+ return assistant_message, chat_history, api_history
68
 
 
69
  chat_history_state = gr.State([])
70
  system_msg = gr.Textbox(BASE_SYSTEM_MESSAGE, label="System Message", placeholder="System prompt.", lines=5)
71
  max_tokens = gr.Slider(20, 1024, label="Max Tokens", step=20, value=1024)
72
  temperature = gr.Slider(0.0, 1.0, label="Temperature", step=0.1, value=0.2)
73
  top_p = gr.Slider(0.0, 1.0, label="Top P", step=0.05, value=0.7)
 
74
  with gr.Blocks() as demo:
75
  chat_history_state = gr.State([])
76
  chatbot = gr.ChatInterface(