MarziehFadaee commited on
Commit
34ed765
1 Parent(s): 6b9f47a

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +141 -0
app.py ADDED
@@ -0,0 +1,141 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import cohere
3
+ import os
4
+ import re
5
+ import uuid
6
+ import secrets
7
+
8
+
9
+
10
+ cohere_api_key = os.getenv("COHERE_API_KEY")
11
+ co = cohere.Client(cohere_api_key, client_name="huggingface-aya-23")
12
+
13
+
14
+ def trigger_example(example):
15
+ chat, updated_history = generate_response(example)
16
+ return chat, updated_history
17
+
18
+ def generate_response(user_message, cid, token, history=None):
19
+
20
+ if not token:
21
+ raise gr.Error("Error loading.")
22
+
23
+ if history is None:
24
+ history = []
25
+ if cid == "" or None:
26
+ cid = str(uuid.uuid4())
27
+
28
+ print(f"cid: {cid} prompt:{user_message}")
29
+
30
+ history.append(user_message)
31
+
32
+ stream = co.chat_stream(message=user_message, conversation_id=cid, model='c4ai-aya-23', connectors=[], temperature=0.3)
33
+ #stream = co.generate(prompt=user_message, model='c4ai-aya-23')
34
+ output = ""
35
+
36
+ for idx, response in enumerate(stream):
37
+ if response.event_type == "text-generation":
38
+ output += response.text
39
+ if idx == 0:
40
+ history.append(" " + output)
41
+ else:
42
+ history[-1] = output
43
+ chat = [
44
+ (history[i].strip(), history[i + 1].strip())
45
+ for i in range(0, len(history) - 1, 2)
46
+ ]
47
+ yield chat, history, cid
48
+
49
+ return chat, history, cid
50
+
51
+
52
+ def clear_chat():
53
+ return [], [], str(uuid.uuid4())
54
+
55
+
56
+ examples = [
57
+ "Explain the relativity theory in French",
58
+ "Como sair de um helicóptero que caiu na água?",
59
+ "¿Cómo le explicarías el aprendizaje automático a un extraterrestre?",
60
+ "Explain gravity to a chicken.",
61
+ "Descrivi il processo di creazione di un capolavoro, come se fossi un artista del Rinascimento a Firenze.",
62
+ "Anneme onu ne kadar sevdiğimi anlatan bir mektup yaz",
63
+ "Explique-moi le sens de la vie selon un grand auteur littéraire.",
64
+ "Give me an example of an endangered species and let me know what I can do to help preserve it",
65
+ "یک پاراگراف در مورد زیبایی‌های طبیعت در فصل پاییز بنویس",
66
+ "Wie kann ich lernen, selbstbewusster zu werden?",
67
+ "Formally introduce the transformer architecture with notation.",
68
+ ]
69
+
70
+ custom_css = """
71
+ #logo-img {
72
+ border: none !important;
73
+ }
74
+ #chat-message {
75
+ font-size: 14px;
76
+ min-height: 300px;
77
+ }
78
+ """
79
+
80
+ with gr.Blocks(analytics_enabled=False, css=custom_css) as demo:
81
+ cid = gr.State("")
82
+ token = gr.State(value=None)
83
+
84
+ with gr.Row():
85
+ with gr.Column(scale=1):
86
+ gr.Image("aya-logo.png", elem_id="logo-img", show_label=False, show_share_button=False, show_download_button=False)
87
+ with gr.Column(scale=3):
88
+ gr.Markdown("""C4AI Aya 23 is a research open weights release of an 8 and 35 billion parameter with highly advanced instruction fine-tuned model, covering 23 languages: Arabic, Chinese (simplified & traditional), Czech, Dutch, English, French, German, Greek, Hebrew, Hindi, Indonesian, Italian, Japanese, Korean, Persian, Polish, Portuguese, Romanian, Russian, Spanish, Turkish, Ukrainian, and Vietnamese.
89
+ <br/>
90
+ **Note**: Aya 23 is a single-turn instruction-following model and it is not optimized for chat mode use.
91
+ <br/>
92
+ **Model**: [aya-23-35B](https://huggingface.co/CohereForAI/aya-23-35B)
93
+ <br/>
94
+ **Developed by**: [Cohere for AI](https://cohere.com/research) and [Cohere](https://cohere.com/)
95
+ <br/>
96
+ **License**: [CC-BY-NC](https://cohere.com/c4ai-cc-by-nc-license), requires also adhering to [C4AI's Acceptable Use Policy](https://docs.cohere.com/docs/c4ai-acceptable-use-policy)
97
+ """
98
+ )
99
+
100
+ with gr.Column():
101
+ with gr.Row():
102
+ chatbot = gr.Chatbot(show_label=False, show_share_button=False, show_copy_button=True)
103
+
104
+ with gr.Row():
105
+ user_message = gr.Textbox(lines=1, placeholder="Ask anything ...", label="Input", show_label=False)
106
+
107
+
108
+ with gr.Row():
109
+ submit_button = gr.Button("Submit")
110
+ clear_button = gr.Button("Clear chat")
111
+
112
+
113
+ history = gr.State([])
114
+
115
+ user_message.submit(fn=generate_response, inputs=[user_message, cid, token, history], outputs=[chatbot, history, cid], concurrency_limit=32)
116
+ submit_button.click(fn=generate_response, inputs=[user_message, cid, token, history], outputs=[chatbot, history, cid], concurrency_limit=32)
117
+
118
+ clear_button.click(fn=clear_chat, inputs=None, outputs=[chatbot, history, cid], concurrency_limit=32)
119
+
120
+ user_message.submit(lambda x: gr.update(value=""), None, [user_message], queue=False)
121
+ submit_button.click(lambda x: gr.update(value=""), None, [user_message], queue=False)
122
+ clear_button.click(lambda x: gr.update(value=""), None, [user_message], queue=False)
123
+
124
+ with gr.Row():
125
+ gr.Examples(
126
+ examples=examples,
127
+ inputs=user_message,
128
+ cache_examples=False,
129
+ fn=trigger_example,
130
+ outputs=[chatbot],
131
+ examples_per_page=100
132
+ )
133
+
134
+ demo.load(lambda: secrets.token_hex(16), None, token)
135
+
136
+ if __name__ == "__main__":
137
+ # demo.launch(debug=True)
138
+ try:
139
+ demo.queue(api_open=False, max_size=40).launch(show_api=False)
140
+ except Exception as e:
141
+ print(f"Error: {e}")