Abalsia2 commited on
Commit
3e38f63
1 Parent(s): 38fa2d4

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +111 -49
app.py CHANGED
@@ -1,63 +1,125 @@
1
  import gradio as gr
2
- from huggingface_hub import InferenceClient
 
 
 
 
3
 
4
- """
5
- For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
6
- """
7
- client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
8
 
9
 
10
- def respond(
11
- message,
12
- history: list[tuple[str, str]],
13
- system_message,
14
- max_tokens,
15
- temperature,
16
- top_p,
17
- ):
18
- messages = [{"role": "system", "content": system_message}]
19
 
20
- for val in history:
21
- if val[0]:
22
- messages.append({"role": "user", "content": val[0]})
23
- if val[1]:
24
- messages.append({"role": "assistant", "content": val[1]})
25
 
26
- messages.append({"role": "user", "content": message})
 
 
 
 
27
 
28
- response = ""
 
 
 
 
 
 
29
 
30
- for message in client.chat_completion(
31
- messages,
32
- max_tokens=max_tokens,
33
- stream=True,
34
- temperature=temperature,
35
- top_p=top_p,
36
- ):
37
- token = message.choices[0].delta.content
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
38
 
39
- response += token
40
- yield response
41
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
42
  """
43
- For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
44
- """
45
- demo = gr.ChatInterface(
46
- respond,
47
- additional_inputs=[
48
- gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
49
- gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
50
- gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
51
- gr.Slider(
52
- minimum=0.1,
53
- maximum=1.0,
54
- value=0.95,
55
- step=0.05,
56
- label="Top-p (nucleus sampling)",
57
- ),
58
- ],
59
- )
60
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
61
 
62
  if __name__ == "__main__":
63
- demo.launch()
 
 
 
 
 
1
  import gradio as gr
2
+ import cohere
3
+ import os
4
+ import re
5
+ import uuid
6
+ import secrets
7
 
 
 
 
 
8
 
9
 
10
+ cohere_api_key = os.getenv("OrLcCzgoAtSg8TkG4VMB5G33H84Znq2kj3JjNIk9")
11
+ co = cohere.Client(cohere_api_key, client_name="huggingface-rp")
 
 
 
 
 
 
 
12
 
 
 
 
 
 
13
 
14
+ def trigger_example(example):
15
+ chat, updated_history = generate_response(example)
16
+ return chat, updated_history
17
+
18
+ def generate_response(user_message, cid, token, history=None):
19
 
20
+ if not token:
21
+ raise gr.Error("Error loading.")
22
+
23
+ if history is None:
24
+ history = []
25
+ if cid == "" or None:
26
+ cid = str(uuid.uuid4())
27
 
28
+ print(f"cid: {cid} prompt:{user_message}")
29
+
30
+ history.append(user_message)
31
+
32
+ stream = co.chat_stream(message=user_message, conversation_id=cid, model='command-r-plus', connectors=[], temperature=0.3)
33
+
34
+ output = ""
35
+
36
+ for idx, response in enumerate(stream):
37
+ if response.event_type == "text-generation":
38
+ output += response.text
39
+ if idx == 0:
40
+ history.append(" " + output)
41
+ else:
42
+ history[-1] = output
43
+ chat = [
44
+ (history[i].strip(), history[i + 1].strip())
45
+ for i in range(0, len(history) - 1, 2)
46
+ ]
47
+ yield chat, history, cid
48
+
49
+ return chat, history, cid
50
+
51
 
52
+ def clear_chat():
53
+ return [], [], str(uuid.uuid4())
54
 
55
+
56
+ examples = [
57
+ "Pouvez-vous expliquer les règles relatives aux heures supplémentaires selon le Code du travail ?",
58
+ "Quels sont les droits et obligations concernant les congés payés selon le Code du travail ?",
59
+ "Quelles sont les conditions requises pour un licenciement pour motif économique en vertu du Code du travail ?",
60
+ "Quelles sont les mesures prévues par le Code du travail en cas de harcèlement au travail ?",
61
+
62
+ ]
63
+
64
+ custom_css = """
65
+ #logo-img {
66
+ border: none !important;
67
+ }
68
+ #chat-message {
69
+ font-size: 14px;
70
+ min-height: 300px;
71
+ }
72
  """
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
73
 
74
+ with gr.Blocks(analytics_enabled=False, css=custom_css) as demo:
75
+ cid = gr.State("")
76
+ token = gr.State(value=None)
77
+
78
+ with gr.Row():
79
+ with gr.Column(scale=1):
80
+ #gr.Image("logoplus.png", elem_id="logo-img", show_label=False, show_share_button=False, show_download_button=False)
81
+ with gr.Column(scale=3):
82
+ #gr.Markdown(""" """)
83
+
84
+ with gr.Column():
85
+ with gr.Row():
86
+ chatbot = gr.Chatbot(show_label=False, show_share_button=False, show_copy_button=True)
87
+
88
+ with gr.Row():
89
+ user_message = gr.Textbox(lines=1, placeholder="Question ...", label="Input", show_label=False)
90
+
91
+
92
+ with gr.Row():
93
+ submit_button = gr.Button("Envoyer")
94
+ clear_button = gr.Button("Clear chat")
95
+
96
+
97
+ history = gr.State([])
98
+
99
+ user_message.submit(fn=generate_response, inputs=[user_message, cid, token, history], outputs=[chatbot, history, cid], concurrency_limit=32)
100
+ submit_button.click(fn=generate_response, inputs=[user_message, cid, token, history], outputs=[chatbot, history, cid], concurrency_limit=32)
101
+
102
+ clear_button.click(fn=clear_chat, inputs=None, outputs=[chatbot, history, cid], concurrency_limit=32)
103
+
104
+ user_message.submit(lambda x: gr.update(value=""), None, [user_message], queue=False)
105
+ submit_button.click(lambda x: gr.update(value=""), None, [user_message], queue=False)
106
+ clear_button.click(lambda x: gr.update(value=""), None, [user_message], queue=False)
107
+
108
+ with gr.Row():
109
+ gr.Examples(
110
+ examples=examples,
111
+ inputs=user_message,
112
+ cache_examples=False,
113
+ fn=trigger_example,
114
+ outputs=[chatbot],
115
+ examples_per_page=100
116
+ )
117
+
118
+ demo.load(lambda: secrets.token_hex(16), None, token)
119
 
120
  if __name__ == "__main__":
121
+ # demo.launch(debug=True)
122
+ try:
123
+ demo.queue(api_open=False, max_size=40).launch(show_api=False)
124
+ except Exception as e:
125
+ print(f"Error: {e}")