Tonic commited on
Commit
755e1ba
1 Parent(s): 777730b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +23 -194
app.py CHANGED
@@ -6,22 +6,23 @@ import sentencepiece
6
  from tokenization_yi import YiTokenizer
7
 
8
 
 
 
 
 
 
 
 
9
  os.environ['PYTORCH_CUDA_ALLOC_CONF'] = 'max_split_size_mb:50'
10
  model_id = "TheBloke/Yi-34B-200K-Llamafied-GPTQ"
11
 
12
- gptq_config = GPTQConfig( bits=4, exllama_config={"version": 2})
13
- tokenizer = YiTokenizer.from_pretrained("./")
14
- model = AutoModelForCausalLM.from_pretrained( model_id, device_map="auto", torch_dtype="auto", trust_remote_code=True, quantization_config=gptq_config)
15
 
16
- def run(message, chat_history, max_new_tokens=4056, temperature=3.5, top_p=0.9, top_k=800):
17
- prompt = get_prompt(message, chat_history)
18
  input_ids = tokenizer.encode(prompt, return_tensors='pt')
19
-
20
- print("Input IDs:", input_ids) # Debug print
21
- print("Input IDs shape:", input_ids.shape) # Debug print
22
- if input_ids.shape[1] == 0:
23
- raise ValueError("The input is empty after tokenization.")
24
-
25
  input_ids = input_ids.to(model.device)
26
  response_ids = model.generate(
27
  input_ids,
@@ -30,206 +31,34 @@ def run(message, chat_history, max_new_tokens=4056, temperature=3.5, top_p=0.9,
30
  top_p=top_p,
31
  top_k=top_k,
32
  pad_token_id=tokenizer.eos_token_id,
33
- do_sample=True
34
-
35
  )
36
-
37
  response = tokenizer.decode(response_ids[:, input_ids.shape[-1]:][0], skip_special_tokens=True)
38
  return response
39
 
40
- def get_prompt(message, chat_history):
41
- texts = []
42
-
43
- do_strip = False
44
- for user_input, response in chat_history:
45
- user_input = user_input.strip() if do_strip else user_input
46
- do_strip = True
47
- texts.append(f" {response.strip()} {user_input} ")
48
- message = message.strip() if do_strip else message
49
- texts.append(f"{message}")
50
- return ''.join(texts)
51
-
52
  DESCRIPTION = """
53
  # 👋🏻Welcome to 🙋🏻‍♂️Tonic's🧑🏻‍🚀YI-200K🚀"
54
  You can use this Space to test out the current model [Tonic/YI](https://huggingface.co/01-ai/Yi-34B)
55
  You can also use 🧑🏻‍🚀YI-200K🚀 by cloning this space. 🧬🔬🔍 Simply click here: <a style="display:inline-block" href="https://huggingface.co/spaces/Tonic1/YiTonic?duplicate=true"><img src="https://img.shields.io/badge/-Duplicate%20Space-blue?labelColor=white&style=flat&logo=data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAAXNSR0IArs4c6QAAAP5JREFUOE+lk7FqAkEURY+ltunEgFXS2sZGIbXfEPdLlnxJyDdYB62sbbUKpLbVNhyYFzbrrA74YJlh9r079973psed0cvUD4A+4HoCjsA85X0Dfn/RBLBgBDxnQPfAEJgBY+A9gALA4tcbamSzS4xq4FOQAJgCDwV2CPKV8tZAJcAjMMkUe1vX+U+SMhfAJEHasQIWmXNN3abzDwHUrgcRGmYcgKe0bxrblHEB4E/pndMazNpSZGcsZdBlYJcEL9Afo75molJyM2FxmPgmgPqlWNLGfwZGG6UiyEvLzHYDmoPkDDiNm9JR9uboiONcBXrpY1qmgs21x1QwyZcpvxt9NS09PlsPAAAAAElFTkSuQmCC&logoWidth=14" alt="Duplicate Space"></a></h3>
56
  Join us : 🌟TeamTonic🌟 is always making cool demos! Join our active builder's🛠️community on 👻Discord: [Discord](https://discord.gg/nXx5wbX9) On 🤗Huggingface: [TeamTonic](https://huggingface.co/TeamTonic) & [MultiTransformer](https://huggingface.co/MultiTransformer) On 🌐Github: [Polytonic](https://github.com/tonic-ai) & contribute to 🌟 [PolyGPT](https://github.com/tonic-ai/polygpt-alpha)
57
  """
58
-
59
- MAX_MAX_NEW_TOKENS = 4056
60
- DEFAULT_MAX_NEW_TOKENS = 1256
61
- MAX_INPUT_TOKEN_LENGTH = 120000
62
-
63
- def clear_and_save_textbox(message): return '', message
64
-
65
- def display_input(message, history=[]):
66
- history.append((message, ''))
67
- return history
68
-
69
- def delete_prev_fn(history=[]):
70
- try:
71
- message, _ = history.pop()
72
- except IndexError:
73
- message = ''
74
- return history, message or ''
75
-
76
- def generate(message, history_with_input, max_new_tokens, temperature, top_p, top_k):
77
- if int(max_new_tokens) > MAX_MAX_NEW_TOKENS:
78
- raise ValueError
79
-
80
- history = history_with_input[:-1]
81
- response = run(message, history, max_new_tokens, temperature, top_p, top_k)
82
- yield history + [(message, response)]
83
-
84
-
85
- def process_example(message):
86
- generator = generate(message, [], 1024, 2.5, 0.95, 900)
87
- for x in generator:
88
- pass
89
- return '', x
90
-
91
- def check_input_token_length(message, chat_history):
92
- input_token_length = len(message) + len(chat_history)
93
- if input_token_length > MAX_INPUT_TOKEN_LENGTH:
94
- raise gr.Error(f"The accumulated input is too long ({input_token_length} > {MAX_INPUT_TOKEN_LENGTH}). Clear your chat history and try again.")
95
-
96
  with gr.Blocks(theme='ParityError/Anime') as demo:
97
  gr.Markdown(DESCRIPTION)
98
-
99
-
100
-
101
  with gr.Group():
 
 
102
  chatbot = gr.Chatbot(label='TonicYi-30B-200K')
103
- with gr.Row():
104
- textbox = gr.Textbox(
105
- container=False,
106
- show_label=False,
107
- placeholder='As the dawn approached, they leant in and said',
108
- scale=10
109
- )
110
- submit_button = gr.Button('Submit', variant='primary', scale=1, min_width=0)
111
-
112
- with gr.Row():
113
- retry_button = gr.Button('Retry', variant='secondary')
114
- undo_button = gr.Button('Undo', variant='secondary')
115
- clear_button = gr.Button('Clear', variant='secondary')
116
-
117
- saved_input = gr.State()
118
 
119
  with gr.Accordion(label='Advanced options', open=False):
120
- # system_prompt = gr.Textbox(label='System prompt', value=DEFAULT_SYSTEM_PROMPT, lines=5, interactive=False)
121
- max_new_tokens = gr.Slider(label='Max New Tokens', minimum=1, maximum=MAX_MAX_NEW_TOKENS, step=1, value=DEFAULT_MAX_NEW_TOKENS)
122
- temperature = gr.Slider(label='Temperature', minimum=0.1, maximum=4.0, step=0.1, value=0.1)
123
  top_p = gr.Slider(label='Top-P (nucleus sampling)', minimum=0.05, maximum=1.0, step=0.05, value=0.9)
124
- top_k = gr.Slider(label='Top-K', minimum=1, maximum=1000, step=1, value=10)
125
-
126
- textbox.submit(
127
- fn=clear_and_save_textbox,
128
- inputs=textbox,
129
- outputs=[textbox, saved_input],
130
- api_name=False,
131
- queue=False,
132
- ).then(
133
- fn=display_input,
134
- inputs=[saved_input, chatbot],
135
- outputs=chatbot,
136
- api_name=False,
137
- queue=False,
138
- ).then(
139
- fn=check_input_token_length,
140
- inputs=[saved_input, chatbot],
141
- api_name=False,
142
- queue=False,
143
- ).success(
144
- fn=generate,
145
- inputs=[
146
- saved_input,
147
- chatbot,
148
- max_new_tokens,
149
- temperature,
150
- top_p,
151
- top_k,
152
- ],
153
- outputs=chatbot,
154
- api_name="Generate",
155
- )
156
-
157
- button_event_preprocess = submit_button.click(
158
- fn=clear_and_save_textbox,
159
- inputs=textbox,
160
- outputs=[textbox, saved_input],
161
- api_name=False,
162
- queue=False,
163
- ).then(
164
- fn=display_input,
165
- inputs=[saved_input, chatbot],
166
- outputs=chatbot,
167
- api_name=False,
168
- queue=False,
169
- ).then(
170
- fn=check_input_token_length,
171
- inputs=[saved_input, chatbot],
172
- api_name=False,
173
- queue=False,
174
- ).success(
175
- fn=generate,
176
- inputs=[
177
- saved_input,
178
- chatbot,
179
- max_new_tokens,
180
- temperature,
181
- top_p,
182
- top_k,
183
- ],
184
- outputs=chatbot,
185
- api_name="Cgenerate",
186
- )
187
-
188
- retry_button.click(
189
- fn=delete_prev_fn,
190
- inputs=chatbot,
191
- outputs=[chatbot, saved_input],
192
- api_name=False,
193
- queue=False,
194
- ).then(
195
- fn=display_input,
196
- inputs=[saved_input, chatbot],
197
- outputs=chatbot,
198
- api_name=False,
199
- queue=False,
200
- ).then(
201
- fn=generate,
202
- inputs=[
203
- saved_input,
204
- chatbot,
205
- max_new_tokens,
206
- temperature,
207
- top_p,
208
- top_k,
209
- ],
210
- outputs=chatbot,
211
- api_name=False,
212
- )
213
-
214
- undo_button.click(
215
- fn=delete_prev_fn,
216
- inputs=chatbot,
217
- outputs=[chatbot, saved_input],
218
- api_name=False,
219
- queue=False,
220
- ).then(
221
- fn=lambda x: x,
222
- inputs=[saved_input],
223
- outputs=textbox,
224
- api_name=False,
225
- queue=False,
226
- )
227
 
228
- clear_button.click(
229
- fn=lambda: ([], ''),
230
- outputs=[chatbot, saved_input],
231
- queue=False,
232
- api_name=False,
233
  )
234
 
235
- demo.queue().launch(show_api=True)
 
6
  from tokenization_yi import YiTokenizer
7
 
8
 
9
+ from transformers import AutoModelForCausalLM, GPTQConfig, AutoTokenizer, AutoModelForCausalLM
10
+ import torch
11
+ import os
12
+ import gradio as gr
13
+ import sentencepiece
14
+ from tokenization_yi import YiTokenizer
15
+
16
  os.environ['PYTORCH_CUDA_ALLOC_CONF'] = 'max_split_size_mb:50'
17
  model_id = "TheBloke/Yi-34B-200K-Llamafied-GPTQ"
18
 
19
+ gptq_config = GPTQConfig(bits=4, exllama_config={"version": 2})
20
+ tokenizer = YiTokenizer.from_pretrained("./") #self-tokenizer method
21
+ model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype="auto", trust_remote_code=True, quantization_config=gptq_config)
22
 
23
+ def predict(message, max_new_tokens=4056, temperature=3.5, top_p=0.9, top_k=800):
24
+ prompt = message.strip()
25
  input_ids = tokenizer.encode(prompt, return_tensors='pt')
 
 
 
 
 
 
26
  input_ids = input_ids.to(model.device)
27
  response_ids = model.generate(
28
  input_ids,
 
31
  top_p=top_p,
32
  top_k=top_k,
33
  pad_token_id=tokenizer.eos_token_id,
34
+ do_sample=True
 
35
  )
 
36
  response = tokenizer.decode(response_ids[:, input_ids.shape[-1]:][0], skip_special_tokens=True)
37
  return response
38
 
 
 
 
 
 
 
 
 
 
 
 
 
39
  DESCRIPTION = """
40
  # 👋🏻Welcome to 🙋🏻‍♂️Tonic's🧑🏻‍🚀YI-200K🚀"
41
  You can use this Space to test out the current model [Tonic/YI](https://huggingface.co/01-ai/Yi-34B)
42
  You can also use 🧑🏻‍🚀YI-200K🚀 by cloning this space. 🧬🔬🔍 Simply click here: <a style="display:inline-block" href="https://huggingface.co/spaces/Tonic1/YiTonic?duplicate=true"><img src="https://img.shields.io/badge/-Duplicate%20Space-blue?labelColor=white&style=flat&logo=data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAAXNSR0IArs4c6QAAAP5JREFUOE+lk7FqAkEURY+ltunEgFXS2sZGIbXfEPdLlnxJyDdYB62sbbUKpLbVNhyYFzbrrA74YJlh9r079973psed0cvUD4A+4HoCjsA85X0Dfn/RBLBgBDxnQPfAEJgBY+A9gALA4tcbamSzS4xq4FOQAJgCDwV2CPKV8tZAJcAjMMkUe1vX+U+SMhfAJEHasQIWmXNN3abzDwHUrgcRGmYcgKe0bxrblHEB4E/pndMazNpSZGcsZdBlYJcEL9Afo75molJyM2FxmPgmgPqlWNLGfwZGG6UiyEvLzHYDmoPkDDiNm9JR9uboiONcBXrpY1qmgs21x1QwyZcpvxt9NS09PlsPAAAAAElFTkSuQmCC&logoWidth=14" alt="Duplicate Space"></a></h3>
43
  Join us : 🌟TeamTonic🌟 is always making cool demos! Join our active builder's🛠️community on 👻Discord: [Discord](https://discord.gg/nXx5wbX9) On 🤗Huggingface: [TeamTonic](https://huggingface.co/TeamTonic) & [MultiTransformer](https://huggingface.co/MultiTransformer) On 🌐Github: [Polytonic](https://github.com/tonic-ai) & contribute to 🌟 [PolyGPT](https://github.com/tonic-ai/polygpt-alpha)
44
  """
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
45
  with gr.Blocks(theme='ParityError/Anime') as demo:
46
  gr.Markdown(DESCRIPTION)
 
 
 
47
  with gr.Group():
48
+ textbox = gr.Textbox(placeholder='Enter your message here', label='Your Message', lines=2)
49
+ submit_button = gr.Button('Submit', variant='primary')
50
  chatbot = gr.Chatbot(label='TonicYi-30B-200K')
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
51
 
52
  with gr.Accordion(label='Advanced options', open=False):
53
+ max_new_tokens = gr.Slider(label='Max New Tokens', minimum=1, maximum=2056, step=1, value=980)
54
+ temperature = gr.Slider(label='Temperature', minimum=0.1, maximum=4.0, step=0.1, value=1.2)
 
55
  top_p = gr.Slider(label='Top-P (nucleus sampling)', minimum=0.05, maximum=1.0, step=0.05, value=0.9)
56
+ top_k = gr.Slider(label='Top-K', minimum=1, maximum=1000, step=1, value=900)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
57
 
58
+ submit_button.click(
59
+ fn=predict,
60
+ inputs=[textbox, max_new_tokens, temperature, top_p, top_k],
61
+ outputs=chatbot
 
62
  )
63
 
64
+ demo.launch(concurrency_limit=5)