artificialguybr commited on
Commit
3be9b12
1 Parent(s): 75840d0

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +48 -55
app.py CHANGED
@@ -3,7 +3,6 @@ import requests
3
  import json
4
  import os
5
 
6
- # API and environment variables
7
  API_KEY = os.getenv('API_KEY')
8
  INVOKE_URL = "https://api.nvcf.nvidia.com/v2/nvcf/pexec/functions/0e349b44-440a-44e1-93e9-abe8dcb27158"
9
  FETCH_URL_FORMAT = "https://api.nvcf.nvidia.com/v2/nvcf/pexec/status/"
@@ -13,20 +12,9 @@ headers = {
13
  "Content-Type": "application/json",
14
  }
15
 
16
- # Base system message
17
  BASE_SYSTEM_MESSAGE = "I carefully provide accurate, factual, thoughtful, nuanced answers and am brilliant at reasoning."
18
 
19
-
20
- def clear_chat(chat_history_state, chat_message):
21
- """Clears the chat history and message state."""
22
- print("Clearing chat...")
23
- chat_history_state = []
24
- chat_message = ''
25
- return chat_history_state, chat_message
26
-
27
-
28
  def user(message, history, system_message=None):
29
- """Updates the chat history with the user message."""
30
  print(f"User message: {message}")
31
  history = history or []
32
  if system_message:
@@ -34,9 +22,7 @@ def user(message, history, system_message=None):
34
  history.append({"role": "user", "content": message})
35
  return history
36
 
37
-
38
  def call_nvidia_api(history, max_tokens, temperature, top_p):
39
- """Calls the NVIDIA API to generate a response."""
40
  payload = {
41
  "messages": history,
42
  "temperature": temperature,
@@ -45,68 +31,75 @@ def call_nvidia_api(history, max_tokens, temperature, top_p):
45
  "stream": False
46
  }
47
  print(f"Payload enviado: {payload}")
 
48
  session = requests.Session()
49
  response = session.post(INVOKE_URL, headers=headers, json=payload)
 
50
  while response.status_code == 202:
51
  request_id = response.headers.get("NVCF-REQID")
52
  fetch_url = FETCH_URL_FORMAT + request_id
53
  response = session.get(fetch_url, headers=headers)
54
  response.raise_for_status()
55
- response_body = response.json()
56
- print(f"Payload recebido: {response_body}")
57
- if response_body["choices"]:
58
- assistant_message = response_body["choices"][0]["message"]["content"]
59
- history.append({"role": "assistant", "content": assistant_message})
60
- return history
61
 
 
 
 
 
 
 
 
 
 
 
62
 
63
  def update_chatbot(message, chat_history, system_message, max_tokens, temperature, top_p):
64
- """Updates the chatbot with the user message and generates a response."""
65
  print("Updating chatbot...")
66
- if not chat_history or (chat_history and chat_history[-1]["role"] != "user"):
67
- chat_history = user(message, chat_history, system_message)
68
- else:
69
- chat_history = user(message, chat_history)
70
  chat_history = call_nvidia_api(chat_history, max_tokens, temperature, top_p)
71
  return chat_history
72
 
73
-
74
- # Gradio interface components
75
- system_msg = gr.Textbox(BASE_SYSTEM_MESSAGE, label="System Message", placeholder="System prompt.", lines=5)
76
- max_tokens = gr.Slider(20, 1024, label="Max Tokens", step=20, value=1024)
77
- temperature = gr.Slider(0.0, 1.0, label="Temperature", step=0.1, value=0.2)
78
- top_p = gr.Slider(0.0, 1.0, label="Top P", step=0.05, value=0.7)
79
-
80
  with gr.Blocks() as demo:
81
  with gr.Row():
82
  with gr.Column():
83
  gr.Markdown("LLAMA 2 70B Free Demo")
84
  description = """
85
- <div style="text-align: center; font-size: 1.5em; margin-bottom: 20px;">
86
- <strong>Explore the Capabilities of LLAMA 2 70B</strong>
87
- </div>
88
- <p>Llama 2 is a large language AI model capable of generating text and code in response to prompts. </p>
89
- <p> <strong>How to Use:</strong></p>
90
- <ol>
91
- <li>Enter your <strong>message</strong> in the textbox to start a conversation or ask a question.</li>
92
- <li>Adjust the parameters in the "Additional Inputs" accordion to control the model's behavior.</li>
93
- <li>Use the buttons below the chatbot to submit your query, clear the chat history, or perform other actions.</li>
94
- </ol>
95
- <p> <strong>Powered by NVIDIA's cutting-edge AI API, LLAMA 2 70B offers an unparalleled opportunity to interact with an AI model of exceptional conversational ability, accessible to everyone at no cost.</strong></p>
96
- <p> <strong>HF Created by:</strong> @artificialguybr (<a href="https://twitter.com/artificialguybr">Twitter</a>)</p>
97
- <p> <strong>Discover more:</strong> <a href="https://artificialguy.com">artificialguy.com</a></p>
98
- """
99
  gr.Markdown(description)
100
 
101
- chat_history_state = gr.State([])
102
- chatbot = gr.ChatInterface(
103
- fn=lambda message, history: update_chatbot(message, history, system_msg.value, max_tokens.value, temperature.value, top_p.value),
104
- additional_inputs=[system_msg, max_tokens, temperature, top_p],
105
- title="LLAMA 2 70B Chatbot",
106
- submit_btn="Submit",
107
- clear_btn="🗑️ Clear",
108
- )
 
 
 
 
 
 
 
 
 
 
109
 
110
- chatbot.clear(fn=clear_chat, outputs=[chat_history_state, chatbot.textbox])
111
 
112
  demo.launch()
 
3
  import json
4
  import os
5
 
 
6
  API_KEY = os.getenv('API_KEY')
7
  INVOKE_URL = "https://api.nvcf.nvidia.com/v2/nvcf/pexec/functions/0e349b44-440a-44e1-93e9-abe8dcb27158"
8
  FETCH_URL_FORMAT = "https://api.nvcf.nvidia.com/v2/nvcf/pexec/status/"
 
12
  "Content-Type": "application/json",
13
  }
14
 
 
15
  BASE_SYSTEM_MESSAGE = "I carefully provide accurate, factual, thoughtful, nuanced answers and am brilliant at reasoning."
16
 
 
 
 
 
 
 
 
 
 
17
  def user(message, history, system_message=None):
 
18
  print(f"User message: {message}")
19
  history = history or []
20
  if system_message:
 
22
  history.append({"role": "user", "content": message})
23
  return history
24
 
 
25
  def call_nvidia_api(history, max_tokens, temperature, top_p):
 
26
  payload = {
27
  "messages": history,
28
  "temperature": temperature,
 
31
  "stream": False
32
  }
33
  print(f"Payload enviado: {payload}")
34
+
35
  session = requests.Session()
36
  response = session.post(INVOKE_URL, headers=headers, json=payload)
37
+
38
  while response.status_code == 202:
39
  request_id = response.headers.get("NVCF-REQID")
40
  fetch_url = FETCH_URL_FORMAT + request_id
41
  response = session.get(fetch_url, headers=headers)
42
  response.raise_for_status()
43
+ response_body = response.json()
44
+ print(f"Payload recebido: {response_body}")
 
 
 
 
45
 
46
+ if response_body["choices"]:
47
+ assistant_message = response_body["choices"][0]["message"]["content"]
48
+ history.append({"role": "assistant", "content": assistant_message})
49
+ return history
50
+
51
+ def chat(history, system_message, max_tokens, temperature, top_p, top_k, repetition_penalty):
52
+ print("Starting chat...")
53
+ updated_history = user(None, history, system_message)
54
+ updated_history = call_nvidia_api(updated_history, max_tokens, temperature, top_p)
55
+ return updated_history, ""
56
 
57
  def update_chatbot(message, chat_history, system_message, max_tokens, temperature, top_p):
 
58
  print("Updating chatbot...")
59
+ chat_history = user(message, chat_history, system_message if not chat_history else None)
 
 
 
60
  chat_history = call_nvidia_api(chat_history, max_tokens, temperature, top_p)
61
  return chat_history
62
 
 
 
 
 
 
 
 
63
  with gr.Blocks() as demo:
64
  with gr.Row():
65
  with gr.Column():
66
  gr.Markdown("LLAMA 2 70B Free Demo")
67
  description = """
68
+ <div style="text-align: center; font-size: 1.5em; margin-bottom: 20px;">
69
+ <strong>Explore the Capabilities of LLAMA 2 70B</strong>
70
+ </div>
71
+ <p>Llama 2 is a large language AI model capable of generating text and code in response to prompts.</p>
72
+ <p><strong>How to Use:</strong></p>
73
+ <ol>
74
+ <li>Enter your <strong>message</strong> in the textbox to start a conversation or ask a question.</li>
75
+ <li>Adjust the parameters in the "Additional Inputs" accordion to control the model's behavior.</li>
76
+ <li>Use the buttons below the chatbot to submit your query, clear the chat history, or perform other actions.</li>
77
+ </ol>
78
+ <p><strong>Powered by NVIDIA's cutting-edge AI API, LLAMA 2 70B offers an unparalleled opportunity to interact with an AI model of exceptional conversational ability, accessible to everyone at no cost.</strong></p>
79
+ <p><strong>HF Created by:</strong> @artificialguybr (<a href="https://twitter.com/artificialguybr">Twitter</a>)</p>
80
+ <p><strong>Discover more:</strong> <a href="https://artificialguy.com">artificialguy.com</a></p>
81
+ """
82
  gr.Markdown(description)
83
 
84
+ chat_history_state = gr.State([])
85
+
86
+ system_msg = gr.Textbox(BASE_SYSTEM_MESSAGE, label="System Message", placeholder="System prompt.", lines=5)
87
+ max_tokens = gr.Slider(20, 1024, label="Max Tokens", step=20, value=1024)
88
+ temperature = gr.Slider(0.0, 1.0, label="Temperature", step=0.1, value=0.2)
89
+ top_p = gr.Slider(0.0, 1.0, label="Top P", step=0.05, value=0.7)
90
+
91
+ chatbot = gr.ChatInterface(
92
+ fn=lambda message, history: update_chatbot(message, history, system_msg.value, max_tokens.value, temperature.value, top_p.value),
93
+ additional_inputs=[system_msg, max_tokens, temperature, top_p],
94
+ title="LLAMA 2 70B Chatbot",
95
+ submit_btn="Submit",
96
+ clear_btn="🗑️ Clear",
97
+ )
98
+
99
+ def clear_chat():
100
+ chat_history_state.value = []
101
+ chatbot.textbox.value = ""
102
 
103
+ chatbot.clear(outputs=[chat_history_state, chatbot.textbox])
104
 
105
  demo.launch()