artificialguybr commited on
Commit
b6ff2ee
1 Parent(s): a414401

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -8
app.py CHANGED
@@ -16,12 +16,6 @@ headers = {
16
  # Base system message
17
  BASE_SYSTEM_MESSAGE = "I carefully provide accurate, factual, thoughtful, nuanced answers and am brilliant at reasoning."
18
 
19
- def clear_chat():
20
- """Clears the chat history and message state."""
21
- print("Clearing chat...")
22
- chat_history_state.value = []
23
- chatbot.textbox.value = ""
24
-
25
  def call_nvidia_api(history, system_message, max_tokens, temperature, top_p):
26
  """Calls the NVIDIA API to generate a response."""
27
  messages = [{"role": "system", "content": system_message}]
@@ -34,7 +28,7 @@ def call_nvidia_api(history, system_message, max_tokens, temperature, top_p):
34
  "max_tokens": max_tokens,
35
  "stream": False
36
  }
37
- print(f"Payload enviado: {payload}")
38
  session = requests.Session()
39
  response = session.post(INVOKE_URL, headers=headers, json=payload)
40
  while response.status_code == 202:
@@ -43,7 +37,7 @@ def call_nvidia_api(history, system_message, max_tokens, temperature, top_p):
43
  response = session.get(fetch_url, headers=headers)
44
  response.raise_for_status()
45
  response_body = response.json()
46
- print(f"Payload recebido: {response_body}")
47
  if response_body.get("choices"):
48
  assistant_message = response_body["choices"][0]["message"]["content"]
49
  return assistant_message
@@ -70,6 +64,7 @@ system_msg = gr.Textbox(BASE_SYSTEM_MESSAGE, label="System Message", placeholder
70
  max_tokens = gr.Slider(20, 1024, label="Max Tokens", step=20, value=1024)
71
  temperature = gr.Slider(0.0, 1.0, label="Temperature", step=0.1, value=0.2)
72
  top_p = gr.Slider(0.0, 1.0, label="Top P", step=0.05, value=0.7)
 
73
  with gr.Blocks() as demo:
74
  chat_history_state = gr.State([])
75
  chatbot = gr.ChatInterface(
 
16
  # Base system message
17
  BASE_SYSTEM_MESSAGE = "I carefully provide accurate, factual, thoughtful, nuanced answers and am brilliant at reasoning."
18
 
 
 
 
 
 
 
19
  def call_nvidia_api(history, system_message, max_tokens, temperature, top_p):
20
  """Calls the NVIDIA API to generate a response."""
21
  messages = [{"role": "system", "content": system_message}]
 
28
  "max_tokens": max_tokens,
29
  "stream": False
30
  }
31
+ print(f"Payload enviado: {json.dumps(payload, indent=2)}")
32
  session = requests.Session()
33
  response = session.post(INVOKE_URL, headers=headers, json=payload)
34
  while response.status_code == 202:
 
37
  response = session.get(fetch_url, headers=headers)
38
  response.raise_for_status()
39
  response_body = response.json()
40
+ print(f"Payload recebido: {json.dumps(response_body, indent=2)}")
41
  if response_body.get("choices"):
42
  assistant_message = response_body["choices"][0]["message"]["content"]
43
  return assistant_message
 
64
  max_tokens = gr.Slider(20, 1024, label="Max Tokens", step=20, value=1024)
65
  temperature = gr.Slider(0.0, 1.0, label="Temperature", step=0.1, value=0.2)
66
  top_p = gr.Slider(0.0, 1.0, label="Top P", step=0.05, value=0.7)
67
+
68
  with gr.Blocks() as demo:
69
  chat_history_state = gr.State([])
70
  chatbot = gr.ChatInterface(