artificialguybr commited on
Commit
8acbe0b
1 Parent(s): fc26c64

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +89 -26
app.py CHANGED
@@ -15,54 +15,117 @@ headers = {
15
 
16
  BASE_SYSTEM_MESSAGE = "I carefully provide accurate, factual, thoughtful, nuanced answers and am brilliant at reasoning."
17
 
18
- def call_nvidia_api(message, history_api, system_message, max_tokens, temperature, top_p):
19
- messages = [{"role": "system", "content": system_message}] if system_message else []
20
- messages.extend([{"role": "user", "content": message}])
21
- for msg in history_api:
22
- messages.extend([{"role": "user", "content": msg[0]}, {"role": "assistant", "content": msg[1]}])
23
 
 
 
 
 
 
 
 
 
 
24
  payload = {
25
- "messages": messages,
26
  "temperature": temperature,
27
  "top_p": top_p,
28
  "max_tokens": max_tokens,
29
  "stream": False
30
  }
31
 
 
 
32
  session = requests.Session()
33
  response = session.post(INVOKE_URL, headers=headers, json=payload)
 
34
  while response.status_code == 202:
35
  request_id = response.headers.get("NVCF-REQID")
36
  fetch_url = FETCH_URL_FORMAT + request_id
37
  response = session.get(fetch_url, headers=headers)
 
38
  response.raise_for_status()
39
  response_body = response.json()
40
 
41
- if response_body.get("choices"):
42
- assistant_message = response_body["choices"][0]["message"]["content"]
43
- return assistant_message
44
- else:
45
- return "Desculpe, ocorreu um erro ao gerar a resposta."
46
 
47
- def chatbot_function(message, history_api, system_message, max_tokens, temperature, top_p):
48
- assistant_message = call_nvidia_api(message, history_api, system_message, max_tokens, temperature, top_p)
49
- history_api.append([message, assistant_message])
50
- return assistant_message, history_api
 
51
 
52
- system_msg = gr.Textbox(value=BASE_SYSTEM_MESSAGE, label="System Message", placeholder="System prompt.", lines=5)
53
- max_tokens = gr.Slider(minimum=20, maximum=1024, label="Max Tokens", step=20, value=1024)
54
- temperature = gr.Slider(minimum=0.0, maximum=1.0, label="Temperature", step=0.1, value=0.2)
55
- top_p = gr.Slider(minimum=0.0, maximum=1.0, label="Top P", step=0.05, value=0.7)
56
 
 
57
  with gr.Blocks() as demo:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
58
  chat_history_state = gr.State([])
59
- chat_interface = gr.ChatInterface(
60
- fn=chatbot_function,
61
- inputs=["message", "history_api", system_msg, max_tokens, temperature, top_p],
62
- outputs=["assistant_message", "history_api"],
63
- title="LLAMA 70B Free Demo",
64
- description="Explore the capabilities of LLAMA 2 70B",
65
- additional_inputs=[system_msg, max_tokens, temperature, top_p]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
66
  )
67
 
68
  demo.launch()
 
15
 
16
  BASE_SYSTEM_MESSAGE = "I carefully provide accurate, factual, thoughtful, nuanced answers and am brilliant at reasoning."
17
 
18
+ def clear_chat(chat_history_state, chat_message):
19
+ print("Clearing chat...")
20
+ chat_history_state = []
21
+ chat_message = ''
22
+ return chat_history_state, chat_message
23
 
24
+ def user(message, history, system_message=None):
25
+ print(f"User message: {message}")
26
+ history = history or []
27
+ if system_message: # Check if a system message is provided and should be added
28
+ history.append({"role": "system", "content": system_message})
29
+ history.append({"role": "user", "content": message})
30
+ return history
31
+
32
+ def call_nvidia_api(history, max_tokens, temperature, top_p):
33
  payload = {
34
+ "messages": history,
35
  "temperature": temperature,
36
  "top_p": top_p,
37
  "max_tokens": max_tokens,
38
  "stream": False
39
  }
40
 
41
+ print(f"Payload enviado: {payload}") # Imprime o payload enviado
42
+
43
  session = requests.Session()
44
  response = session.post(INVOKE_URL, headers=headers, json=payload)
45
+
46
  while response.status_code == 202:
47
  request_id = response.headers.get("NVCF-REQID")
48
  fetch_url = FETCH_URL_FORMAT + request_id
49
  response = session.get(fetch_url, headers=headers)
50
+
51
  response.raise_for_status()
52
  response_body = response.json()
53
 
54
+ print(f"Payload recebido: {response_body}") # Imprime o payload recebido
 
 
 
 
55
 
56
+ if response_body["choices"]:
57
+ assistant_message = response_body["choices"][0]["message"]["content"]
58
+ history.append({"role": "assistant", "content": assistant_message})
59
+
60
+ return history
61
 
62
+ def chat(history, system_message, max_tokens, temperature, top_p):
63
+ print("Starting chat...")
64
+ updated_history = call_nvidia_api(history, max_tokens, temperature, top_p)
65
+ return updated_history, ""
66
 
67
+ # Gradio interface setup
68
  with gr.Blocks() as demo:
69
+ with gr.Row():
70
+ with gr.Column():
71
+ gr.Markdown("LLAMA 70B Free Demo")
72
+ description="""
73
+ <div style="text-align: center; font-size: 1.5em; margin-bottom: 20px;">
74
+ <strong>Explore the Capabilities of LLAMA 70B</strong>
75
+ </div>
76
+ <p>Code Llama is an LLM capable of generating code, and natural language about code, from both code and natural language prompts.
77
+ </p>
78
+ <p> <strong>How to Use:</strong></p>
79
+ <ol>
80
+ <li>Enter your <strong>message</strong> in the textbox to start a conversation or ask a question.</li>
81
+ <li>Adjust the <strong>Temperature</strong> and <strong>Top P</strong> sliders to control the creativity and diversity of the responses.</li>
82
+ <li>Set the <strong>Max Tokens</strong> slider to determine the length of the response.</li>
83
+ <li>Use the <strong>System Message</strong> textbox if you wish to provide a specific context or instruction for the AI.</li>
84
+ <li>Click <strong>Send message</strong> to submit your query and receive a response from LLAMA70B.</li>
85
+ <li>Press <strong>New topic</strong> to clear the chat history and start a new conversation thread.</li>
86
+ </ol>
87
+ <p> <strong>Powered by NVIDIA's cutting-edge AI API, LLAMA 70B offers an unparalleled opportunity to interact with an AI model of exceptional conversational ability, accessible to everyone at no cost.</strong></p>
88
+ <p> <strong>HF Created by:</strong> @artificialguybr (<a href="https://twitter.com/artificialguybr">Twitter</a>)</p>
89
+ <p> <strong>Discover more:</strong> <a href="https://artificialguy.com">artificialguy.com</a></p>
90
+ """
91
+ gr.Markdown(description)
92
+ chatbot = gr.Chatbot()
93
+ message = gr.Textbox(label="What do you want to chat about?", placeholder="Ask me anything.", lines=3)
94
+ submit = gr.Button(value="Send message")
95
+ clear = gr.Button(value="New topic")
96
+ system_msg = gr.Textbox(BASE_SYSTEM_MESSAGE, label="System Message", placeholder="System prompt.", lines=5)
97
+ max_tokens = gr.Slider(20, 1024 label="Max Tokens", step=20, value=500, interactive=True)
98
+ temperature = gr.Slider(0.0, 1.0, label="Temperature", step=0.1, value=0.7, interactive=True)
99
+ top_p = gr.Slider(0.0, 1.0, label="Top P", step=0.05, value=0.95, interactive=True)
100
  chat_history_state = gr.State([])
101
+
102
+ # Ajuste na definição da função update_chatbot para aceitar o valor atualizado do system_msg
103
+ def update_chatbot(message, chat_history, system_message, max_tokens, temperature, top_p):
104
+ print("Updating chatbot...")
105
+ if not chat_history or (chat_history and chat_history[-1]["role"] != "user"):
106
+ chat_history = user(message, chat_history, system_message if not chat_history else None)
107
+ else:
108
+ chat_history = user(message, chat_history)
109
+ chat_history, _ = chat(chat_history, system_message, max_tokens, temperature, top_p)
110
+
111
+ formatted_chat_history = []
112
+ for user_msg, assistant_msg in zip([msg["content"].strip() for msg in chat_history if msg["role"] == "user"],
113
+ [msg["content"].strip() for msg in chat_history if msg["role"] == "assistant"]):
114
+ if user_msg or assistant_msg: # Verify if either message is not empty
115
+ formatted_chat_history.append([user_msg, assistant_msg])
116
+
117
+ return formatted_chat_history, chat_history, ""
118
+
119
+ submit.click(
120
+ fn=update_chatbot,
121
+ inputs=[message, chat_history_state, system_msg, max_tokens, temperature, top_p],
122
+ outputs=[chatbot, chat_history_state, message]
123
+ )
124
+
125
+ clear.click(
126
+ fn=clear_chat,
127
+ inputs=[chat_history_state, message],
128
+ outputs=[chat_history_state, message]
129
  )
130
 
131
  demo.launch()