mariusjabami commited on
Commit
09742af
·
verified ·
1 Parent(s): 8729649

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +83 -38
app.py CHANGED
@@ -1,58 +1,103 @@
1
- import os
2
  import gradio as gr
3
  from huggingface_hub import InferenceClient
4
 
5
- client = InferenceClient("lambdaindie/lambdai", token=os.environ["HF_TOKEN"])
 
6
 
7
- def respond(message, history, system_message, max_tokens, temperature, top_p):
8
- messages = [{"role": "system", "content": system_message}] if system_message else []
 
 
 
 
 
 
 
 
9
 
10
- for user, assistant in history:
11
- if user:
12
- messages.append({"role": "user", "content": user})
13
- if assistant:
14
- messages.append({"role": "assistant", "content": assistant})
 
15
 
16
  messages.append({"role": "user", "content": message})
 
17
  response = ""
18
 
19
- for chunk in client.text_generation(
 
20
  messages,
21
- max_new_tokens=max_tokens,
22
  stream=True,
23
  temperature=temperature,
24
  top_p=top_p,
25
  ):
26
- token = chunk.choices[0].delta.content
27
  response += token
28
  yield response
29
 
30
- with gr.Blocks() as demo:
31
- gr.Markdown("# 🧠 lambdai Chat Demo")
32
-
33
- chatbot = gr.Chatbot()
34
- with gr.Row():
35
- system_msg = gr.Textbox(label="System message", placeholder="e.g. You are a helpful assistant.")
36
- with gr.Row():
37
- max_tokens = gr.Slider(1, 2048, value=512, step=1, label="Max tokens")
38
- temperature = gr.Slider(0.1, 4.0, value=0.7, step=0.1, label="Temperature")
39
- top_p = gr.Slider(0.1, 1.0, value=0.95, step=0.05, label="Top-p")
40
- msg = gr.Textbox(placeholder="Ask something...", label="Your message")
41
-
42
- state = gr.State([])
43
-
44
- def user_submit(user_message, history):
45
- return "", history + [[user_message, None]]
46
-
47
- def generate_response(message, history, sys_msg, max_tokens, temperature, top_p):
48
- gen = respond(message, history, sys_msg, max_tokens, temperature, top_p)
49
- return gen, history
50
-
51
- msg.submit(user_submit, [msg, state], [msg, state], queue=False).then(
52
- generate_response,
53
- [msg, state, system_msg, max_tokens, temperature, top_p],
54
- [chatbot, state]
55
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
56
 
57
  if __name__ == "__main__":
58
  demo.launch()
 
 
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
3
 
4
+ # Cliente da Inference API
5
+ client = InferenceClient("lambdaindie/lambdai")
6
 
7
+ # Função para responder no chatbot
8
+ def respond(
9
+ message,
10
+ history: list[tuple[str, str]],
11
+ system_message,
12
+ max_tokens,
13
+ temperature,
14
+ top_p,
15
+ ):
16
+ messages = [{"role": "system", "content": system_message}]
17
 
18
+ # Adicionando a história da conversa
19
+ for val in history:
20
+ if val[0]:
21
+ messages.append({"role": "user", "content": val[0]})
22
+ if val[1]:
23
+ messages.append({"role": "assistant", "content": val[1]})
24
 
25
  messages.append({"role": "user", "content": message})
26
+
27
  response = ""
28
 
29
+ # Fluxo de resposta do cliente da API
30
+ for message in client.chat_completion(
31
  messages,
32
+ max_tokens=max_tokens,
33
  stream=True,
34
  temperature=temperature,
35
  top_p=top_p,
36
  ):
37
+ token = message.choices[0].delta.content
38
  response += token
39
  yield response
40
 
41
+
42
+ # Interface do Gradio com chat customizado
43
+ demo = gr.ChatInterface(
44
+ respond,
45
+ additional_inputs=[
46
+ gr.Textbox(value="", label="System message", lines=1, placeholder="System message..."),
47
+ gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
48
+ gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
49
+ gr.Slider(
50
+ minimum=0.1,
51
+ maximum=1.0,
52
+ value=0.95,
53
+ step=0.05,
54
+ label="Top-p (nucleus sampling)",
55
+ ),
56
+ ],
57
+ theme="huggingface", # Usando tema do Hugging Face para um estilo moderno
58
+ title="Lambda Chatbot", # Título na interface
59
+ description="Chatbot alimentado pelo modelo Lambdai", # Descrição simples
60
+ layout="vertical", # Layout vertical
61
+ css="""
62
+ .chatbox {
63
+ background-color: #0e1117;
64
+ color: #f5f5f5;
65
+ font-family: 'JetBrains Mono', monospace;
66
+ border-radius: 8px;
67
+ border: 1px solid #444;
68
+ }
69
+ .gradio-container {
70
+ background-color: #121212;
71
+ padding: 20px;
72
+ border-radius: 10px;
73
+ }
74
+ .gr-button {
75
+ background-color: #4a90e2;
76
+ color: #fff;
77
+ font-family: 'JetBrains Mono', monospace;
78
+ border-radius: 5px;
79
+ }
80
+ .gr-button:hover {
81
+ background-color: #357ab7;
82
+ }
83
+ .gr-slider {
84
+ background-color: #333;
85
+ color: #f5f5f5;
86
+ border-radius: 8px;
87
+ }
88
+ .gr-slider .slider {
89
+ background-color: #444;
90
+ }
91
+ .gr-chatbox-container {
92
+ background-color: #181a1f;
93
+ border-radius: 10px;
94
+ }
95
+ .gr-output {
96
+ font-family: 'JetBrains Mono', monospace;
97
+ color: #f5f5f5;
98
+ }
99
+ """, # Customização de CSS
100
+ )
101
 
102
  if __name__ == "__main__":
103
  demo.launch()