artificialguybr commited on
Commit
cb4c132
1 Parent(s): 2c4f599

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +56 -49
app.py CHANGED
@@ -3,6 +3,7 @@ import requests
3
  import json
4
  import os
5
 
 
6
  API_KEY = os.getenv('API_KEY')
7
  INVOKE_URL = "https://api.nvcf.nvidia.com/v2/nvcf/pexec/functions/0e349b44-440a-44e1-93e9-abe8dcb27158"
8
  FETCH_URL_FORMAT = "https://api.nvcf.nvidia.com/v2/nvcf/pexec/status/"
@@ -12,9 +13,19 @@ headers = {
12
  "Content-Type": "application/json",
13
  }
14
 
 
15
  BASE_SYSTEM_MESSAGE = "I carefully provide accurate, factual, thoughtful, nuanced answers and am brilliant at reasoning."
16
 
 
 
 
 
 
 
 
 
17
  def user(message, history, system_message=None):
 
18
  print(f"User message: {message}")
19
  history = history or []
20
  if system_message:
@@ -22,7 +33,9 @@ def user(message, history, system_message=None):
22
  history.append({"role": "user", "content": message})
23
  return history
24
 
 
25
  def call_nvidia_api(history, max_tokens, temperature, top_p):
 
26
  payload = {
27
  "messages": history,
28
  "temperature": temperature,
@@ -31,75 +44,69 @@ def call_nvidia_api(history, max_tokens, temperature, top_p):
31
  "stream": False
32
  }
33
  print(f"Payload enviado: {payload}")
34
-
35
  session = requests.Session()
36
  response = session.post(INVOKE_URL, headers=headers, json=payload)
37
-
38
  while response.status_code == 202:
39
  request_id = response.headers.get("NVCF-REQID")
40
  fetch_url = FETCH_URL_FORMAT + request_id
41
  response = session.get(fetch_url, headers=headers)
42
  response.raise_for_status()
43
- response_body = response.json()
44
- print(f"Payload recebido: {response_body}")
45
-
46
- if response_body["choices"]:
47
- assistant_message = response_body["choices"][0]["message"]["content"]
48
- history.append({"role": "assistant", "content": assistant_message})
49
- return history
50
 
51
- def chat(history, system_message, max_tokens, temperature, top_p, top_k, repetition_penalty):
52
- print("Starting chat...")
53
- updated_history = user(None, history, system_message)
54
- updated_history = call_nvidia_api(updated_history, max_tokens, temperature, top_p)
55
- return updated_history, ""
56
 
57
  def update_chatbot(message, chat_history, system_message, max_tokens, temperature, top_p):
 
58
  print("Updating chatbot...")
59
- chat_history = user(message, chat_history, system_message if not chat_history else None)
 
 
 
60
  chat_history = call_nvidia_api(chat_history, max_tokens, temperature, top_p)
61
  return chat_history
62
 
 
 
 
 
 
 
 
63
  with gr.Blocks() as demo:
64
  with gr.Row():
65
  with gr.Column():
66
  gr.Markdown("LLAMA 2 70B Free Demo")
67
  description = """
68
- <div style="text-align: center; font-size: 1.5em; margin-bottom: 20px;">
69
- <strong>Explore the Capabilities of LLAMA 2 70B</strong>
70
- </div>
71
- <p>Llama 2 is a large language AI model capable of generating text and code in response to prompts.</p>
72
- <p><strong>How to Use:</strong></p>
73
- <ol>
74
- <li>Enter your <strong>message</strong> in the textbox to start a conversation or ask a question.</li>
75
- <li>Adjust the parameters in the "Additional Inputs" accordion to control the model's behavior.</li>
76
- <li>Use the buttons below the chatbot to submit your query, clear the chat history, or perform other actions.</li>
77
- </ol>
78
- <p><strong>Powered by NVIDIA's cutting-edge AI API, LLAMA 2 70B offers an unparalleled opportunity to interact with an AI model of exceptional conversational ability, accessible to everyone at no cost.</strong></p>
79
- <p><strong>HF Created by:</strong> @artificialguybr (<a href="https://twitter.com/artificialguybr">Twitter</a>)</p>
80
- <p><strong>Discover more:</strong> <a href="https://artificialguy.com">artificialguy.com</a></p>
81
- """
82
  gr.Markdown(description)
83
 
84
- chat_history_state = gr.State([])
85
-
86
- system_msg = gr.Textbox(BASE_SYSTEM_MESSAGE, label="System Message", placeholder="System prompt.", lines=5)
87
- max_tokens = gr.Slider(20, 1024, label="Max Tokens", step=20, value=1024)
88
- temperature = gr.Slider(0.0, 1.0, label="Temperature", step=0.1, value=0.2)
89
- top_p = gr.Slider(0.0, 1.0, label="Top P", step=0.05, value=0.7)
90
-
91
- chatbot = gr.ChatInterface(
92
- fn=lambda message, history: update_chatbot(message, history, system_msg.value, max_tokens.value, temperature.value, top_p.value),
93
- additional_inputs=[system_msg, max_tokens, temperature, top_p],
94
- title="LLAMA 2 70B Chatbot",
95
- submit_btn="Submit",
96
- clear_btn="🗑️ Clear",
97
- )
98
-
99
- def clear_chat():
100
- chat_history_state.value = []
101
- chatbot.textbox.value = ""
102
-
103
- chatbot.clear()
104
 
105
  demo.launch()
 
3
  import json
4
  import os
5
 
6
+ # API and environment variables
7
  API_KEY = os.getenv('API_KEY')
8
  INVOKE_URL = "https://api.nvcf.nvidia.com/v2/nvcf/pexec/functions/0e349b44-440a-44e1-93e9-abe8dcb27158"
9
  FETCH_URL_FORMAT = "https://api.nvcf.nvidia.com/v2/nvcf/pexec/status/"
 
13
  "Content-Type": "application/json",
14
  }
15
 
16
+ # Base system message
17
  BASE_SYSTEM_MESSAGE = "I carefully provide accurate, factual, thoughtful, nuanced answers and am brilliant at reasoning."
18
 
19
+
20
+ def clear_chat():
21
+ """Clears the chat history and message state."""
22
+ print("Clearing chat...")
23
+ chat_history_state.value = []
24
+ chatbot.textbox.value = ""
25
+
26
+
27
  def user(message, history, system_message=None):
28
+ """Updates the chat history with the user message."""
29
  print(f"User message: {message}")
30
  history = history or []
31
  if system_message:
 
33
  history.append({"role": "user", "content": message})
34
  return history
35
 
36
+
37
  def call_nvidia_api(history, max_tokens, temperature, top_p):
38
+ """Calls the NVIDIA API to generate a response."""
39
  payload = {
40
  "messages": history,
41
  "temperature": temperature,
 
44
  "stream": False
45
  }
46
  print(f"Payload enviado: {payload}")
 
47
  session = requests.Session()
48
  response = session.post(INVOKE_URL, headers=headers, json=payload)
 
49
  while response.status_code == 202:
50
  request_id = response.headers.get("NVCF-REQID")
51
  fetch_url = FETCH_URL_FORMAT + request_id
52
  response = session.get(fetch_url, headers=headers)
53
  response.raise_for_status()
54
+ response_body = response.json()
55
+ print(f"Payload recebido: {response_body}")
56
+ if response_body["choices"]:
57
+ assistant_message = response_body["choices"][0]["message"]["content"]
58
+ history.append({"role": "assistant", "content": assistant_message})
59
+ return history
 
60
 
 
 
 
 
 
61
 
62
  def update_chatbot(message, chat_history, system_message, max_tokens, temperature, top_p):
63
+ """Updates the chatbot with the user message and generates a response."""
64
  print("Updating chatbot...")
65
+ if not chat_history or (chat_history and chat_history[-1]["role"] != "user"):
66
+ chat_history = user(message, chat_history, system_message)
67
+ else:
68
+ chat_history = user(message, chat_history)
69
  chat_history = call_nvidia_api(chat_history, max_tokens, temperature, top_p)
70
  return chat_history
71
 
72
+
73
+ # Gradio interface components
74
+ system_msg = gr.Textbox(BASE_SYSTEM_MESSAGE, label="System Message", placeholder="System prompt.", lines=5)
75
+ max_tokens = gr.Slider(20, 1024, label="Max Tokens", step=20, value=1024)
76
+ temperature = gr.Slider(0.0, 1.0, label="Temperature", step=0.1, value=0.2)
77
+ top_p = gr.Slider(0.0, 1.0, label="Top P", step=0.05, value=0.7)
78
+
79
  with gr.Blocks() as demo:
80
  with gr.Row():
81
  with gr.Column():
82
  gr.Markdown("LLAMA 2 70B Free Demo")
83
  description = """
84
+
85
+ Explore the Capabilities of LLAMA 2 70B
86
+
87
+ Llama 2 is a large language AI model capable of generating text and code in response to prompts.
88
+ How to Use:
89
+
90
+ Enter your message in the textbox to start a conversation or ask a question.
91
+ Adjust the parameters in the "Additional Inputs" accordion to control the model's behavior.
92
+ Use the buttons below the chatbot to submit your query, clear the chat history, or perform other actions.
93
+
94
+ Powered by NVIDIA's cutting-edge AI API, LLAMA 2 70B offers an unparalleled opportunity to interact with an AI model of exceptional conversational ability, accessible to everyone at no cost.
95
+ HF Created by: @artificialguybr (Twitter)
96
+ Discover more: artificialguy.com
97
+ """
98
  gr.Markdown(description)
99
 
100
+ chat_history_state = gr.State([])
101
+ chatbot = gr.ChatInterface(
102
+ fn=lambda message, history: update_chatbot(message, history, system_msg.value, max_tokens.value, temperature.value, top_p.value),
103
+ additional_inputs=[system_msg, max_tokens, temperature, top_p],
104
+ title="LLAMA 2 70B Chatbot",
105
+ submit_btn="Submit",
106
+ clear_btn="🗑️ Clear",
107
+ )
108
+
109
+ # Corrected clear button assignment:
110
+ chatbot.clear(outputs=[chat_history_state, chatbot.textbox])
 
 
 
 
 
 
 
 
 
111
 
112
  demo.launch()