LuxOAI ybelkada commited on
Commit
d3a8005
0 Parent(s):

Duplicate from uwnlp/guanaco-playground-tgi

Browse files

Co-authored-by: Younes Belkada <ybelkada@users.noreply.huggingface.co>

Files changed (6) hide show
  1. .gitattributes +34 -0
  2. README.md +13 -0
  3. app.py +273 -0
  4. dialogue.py +239 -0
  5. requirements.txt +8 -0
  6. share_btn.py +98 -0
.gitattributes ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tflite filter=lfs diff=lfs merge=lfs -text
29
+ *.tgz filter=lfs diff=lfs merge=lfs -text
30
+ *.wasm filter=lfs diff=lfs merge=lfs -text
31
+ *.xz filter=lfs diff=lfs merge=lfs -text
32
+ *.zip filter=lfs diff=lfs merge=lfs -text
33
+ *.zst filter=lfs diff=lfs merge=lfs -text
34
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: Guanaco Playground Tgi
3
+ emoji: 📊
4
+ colorFrom: pink
5
+ colorTo: green
6
+ sdk: gradio
7
+ sdk_version: 3.29.0
8
+ app_file: app.py
9
+ pinned: false
10
+ duplicated_from: uwnlp/guanaco-playground-tgi
11
+ ---
12
+
13
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py ADDED
@@ -0,0 +1,273 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+ import gradio as gr
4
+ from huggingface_hub import Repository
5
+ from text_generation import Client
6
+
7
+ # from dialogues import DialogueTemplate
8
+ from share_btn import (community_icon_html, loading_icon_html, share_btn_css,
9
+ share_js)
10
+
11
+ HF_TOKEN = os.environ.get("HF_TOKEN", None)
12
+ API_TOKEN = os.environ.get("API_TOKEN", None)
13
+ API_URL = os.environ.get("API_URL", None)
14
+ API_URL = "https://api-inference.huggingface.co/models/timdettmers/guanaco-33b-merged"
15
+
16
+ client = Client(
17
+ API_URL,
18
+ headers={"Authorization": f"Bearer {API_TOKEN}"},
19
+ )
20
+
21
+ repo = None
22
+
23
+
24
+ def get_total_inputs(inputs, chatbot, preprompt, user_name, assistant_name, sep):
25
+ past = []
26
+ for data in chatbot:
27
+ user_data, model_data = data
28
+
29
+ if not user_data.startswith(user_name):
30
+ user_data = user_name + user_data
31
+ if not model_data.startswith(sep + assistant_name):
32
+ model_data = sep + assistant_name + model_data
33
+
34
+ past.append(user_data + model_data.rstrip() + sep)
35
+
36
+ if not inputs.startswith(user_name):
37
+ inputs = user_name + inputs
38
+
39
+ total_inputs = preprompt + "".join(past) + inputs + sep + assistant_name.rstrip()
40
+
41
+ return total_inputs
42
+
43
+
44
+ def has_no_history(chatbot, history):
45
+ return not chatbot and not history
46
+
47
+
48
+ header = "A chat between a curious human and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user's questions."
49
+ prompt_template = "### Human: {query}\n### Assistant:{response}"
50
+
51
+ def generate(
52
+ user_message,
53
+ chatbot,
54
+ history,
55
+ temperature,
56
+ top_p,
57
+ max_new_tokens,
58
+ repetition_penalty,
59
+ ):
60
+ # Don't return meaningless message when the input is empty
61
+ if not user_message:
62
+ print("Empty input")
63
+
64
+ history.append(user_message)
65
+
66
+ past_messages = []
67
+ for data in chatbot:
68
+ user_data, model_data = data
69
+
70
+ past_messages.extend(
71
+ [{"role": "user", "content": user_data}, {"role": "assistant", "content": model_data.rstrip()}]
72
+ )
73
+
74
+ if len(past_messages) < 1:
75
+ prompt = header + prompt_template.format(query=user_message, response="")
76
+ else:
77
+ prompt = header
78
+ for i in range(0, len(past_messages), 2):
79
+ intermediate_prompt = prompt_template.format(query=past_messages[i]["content"], response=past_messages[i+1]["content"])
80
+ print("intermediate: ", intermediate_prompt)
81
+ prompt = prompt + '\n' + intermediate_prompt
82
+
83
+ prompt = prompt + prompt_template.format(query=user_message, response="")
84
+
85
+
86
+ generate_kwargs = {
87
+ "temperature": temperature,
88
+ "top_p": top_p,
89
+ "max_new_tokens": max_new_tokens,
90
+ }
91
+
92
+ temperature = float(temperature)
93
+ if temperature < 1e-2:
94
+ temperature = 1e-2
95
+ top_p = float(top_p)
96
+
97
+ generate_kwargs = dict(
98
+ temperature=temperature,
99
+ max_new_tokens=max_new_tokens,
100
+ top_p=top_p,
101
+ repetition_penalty=repetition_penalty,
102
+ do_sample=True,
103
+ truncate=999,
104
+ seed=42,
105
+ )
106
+
107
+ stream = client.generate_stream(
108
+ prompt,
109
+ **generate_kwargs,
110
+ )
111
+
112
+ output = ""
113
+ for idx, response in enumerate(stream):
114
+ if response.token.text == '':
115
+ break
116
+
117
+ if response.token.special:
118
+ continue
119
+ output += response.token.text
120
+ if idx == 0:
121
+ history.append(" " + output)
122
+ else:
123
+ history[-1] = output
124
+
125
+ chat = [(history[i].strip(), history[i + 1].strip()) for i in range(0, len(history) - 1, 2)]
126
+
127
+ yield chat, history, user_message, ""
128
+
129
+ return chat, history, user_message, ""
130
+
131
+
132
+ examples = [
133
+ "A Llama entered in my garden, what should I do?"
134
+ ]
135
+
136
+
137
+ def clear_chat():
138
+ return [], []
139
+
140
+
141
+ def process_example(args):
142
+ for [x, y] in generate(args):
143
+ pass
144
+ return [x, y]
145
+
146
+
147
+ title = """<h1 align="center">Guanaco Playground 💬</h1>"""
148
+ custom_css = """
149
+ #banner-image {
150
+ display: block;
151
+ margin-left: auto;
152
+ margin-right: auto;
153
+ }
154
+ #chat-message {
155
+ font-size: 14px;
156
+ min-height: 300px;
157
+ }
158
+ """
159
+
160
+ with gr.Blocks(analytics_enabled=False, css=custom_css) as demo:
161
+ gr.HTML(title)
162
+
163
+ with gr.Row():
164
+ with gr.Column():
165
+ gr.Markdown(
166
+ """
167
+ 💻 This demo showcases the Guanaco 33B model, released together with the paper [QLoRA](https://arxiv.org/abs/2305.14314)
168
+ """
169
+ )
170
+
171
+ with gr.Row():
172
+ with gr.Box():
173
+ output = gr.Markdown()
174
+ chatbot = gr.Chatbot(elem_id="chat-message", label="Chat")
175
+
176
+ with gr.Row():
177
+ with gr.Column(scale=3):
178
+ user_message = gr.Textbox(placeholder="Enter your message here", show_label=False, elem_id="q-input")
179
+ with gr.Row():
180
+ send_button = gr.Button("Send", elem_id="send-btn", visible=True)
181
+
182
+ clear_chat_button = gr.Button("Clear chat", elem_id="clear-btn", visible=True)
183
+
184
+ with gr.Accordion(label="Parameters", open=False, elem_id="parameters-accordion"):
185
+ temperature = gr.Slider(
186
+ label="Temperature",
187
+ value=0.7,
188
+ minimum=0.0,
189
+ maximum=1.0,
190
+ step=0.1,
191
+ interactive=True,
192
+ info="Higher values produce more diverse outputs",
193
+ )
194
+ top_p = gr.Slider(
195
+ label="Top-p (nucleus sampling)",
196
+ value=0.9,
197
+ minimum=0.0,
198
+ maximum=1,
199
+ step=0.05,
200
+ interactive=True,
201
+ info="Higher values sample more low-probability tokens",
202
+ )
203
+ max_new_tokens = gr.Slider(
204
+ label="Max new tokens",
205
+ value=1024,
206
+ minimum=0,
207
+ maximum=2048,
208
+ step=4,
209
+ interactive=True,
210
+ info="The maximum numbers of new tokens",
211
+ )
212
+ repetition_penalty = gr.Slider(
213
+ label="Repetition Penalty",
214
+ value=1.2,
215
+ minimum=0.0,
216
+ maximum=10,
217
+ step=0.1,
218
+ interactive=True,
219
+ info="The parameter for repetition penalty. 1.0 means no penalty.",
220
+ )
221
+ with gr.Row():
222
+ gr.Examples(
223
+ examples=examples,
224
+ inputs=[user_message],
225
+ cache_examples=False,
226
+ fn=process_example,
227
+ outputs=[output],
228
+ )
229
+
230
+ with gr.Row():
231
+ gr.Markdown(
232
+ "Disclaimer: The model can produce factually incorrect output, and should not be relied on to produce "
233
+ "factually accurate information. The model was trained on various public datasets; while great efforts "
234
+ "have been taken to clean the pretraining data, it is possible that this model could generate lewd, "
235
+ "biased, or otherwise offensive outputs.",
236
+ elem_classes=["disclaimer"],
237
+ )
238
+
239
+
240
+ history = gr.State([])
241
+ last_user_message = gr.State("")
242
+
243
+ user_message.submit(
244
+ generate,
245
+ inputs=[
246
+ user_message,
247
+ chatbot,
248
+ history,
249
+ temperature,
250
+ top_p,
251
+ max_new_tokens,
252
+ repetition_penalty,
253
+ ],
254
+ outputs=[chatbot, history, last_user_message, user_message],
255
+ )
256
+
257
+ send_button.click(
258
+ generate,
259
+ inputs=[
260
+ user_message,
261
+ chatbot,
262
+ history,
263
+ temperature,
264
+ top_p,
265
+ max_new_tokens,
266
+ repetition_penalty,
267
+ ],
268
+ outputs=[chatbot, history, last_user_message, user_message],
269
+ )
270
+
271
+ clear_chat_button.click(clear_chat, outputs=[chatbot, history])
272
+
273
+ demo.queue(concurrency_count=16).launch(debug=True)
dialogue.py ADDED
@@ -0,0 +1,239 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 The HuggingFace Team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import json
17
+ import os
18
+ from dataclasses import asdict, dataclass
19
+ from pathlib import Path
20
+ from typing import Any, Dict, List, Optional, Type, TypeVar, Union
21
+
22
+ from huggingface_hub import ModelHubMixin, hf_hub_download
23
+
24
+ # Generic variable that is either ModelHubMixin or a subclass thereof
25
+ T = TypeVar("T", bound="ModelHubMixin")
26
+
27
+ TEMPLATE_FILENAME = "dialogue_template.json"
28
+ IGNORE_INDEX = -100
29
+
30
+
31
+ @dataclass
32
+ class DialogueTemplate(ModelHubMixin):
33
+ """Converts all turns of a dialogue between a user and assistant to a standardized format.
34
+ Adapted from OpenAI's ChatML (https://github.com/openai/openai-python/blob/main/chatml.md) and Vicuna (https://github.com/lm-sys/FastChat/blob/main/fastchat/conversation.py)
35
+ """
36
+
37
+ system: str
38
+ messages: List[Dict[str, str]] = None
39
+ system_token: str = "<|system|>"
40
+ user_token: str = "<|user|>"
41
+ assistant_token: str = "<|assistant|>"
42
+ end_token: str = "<|end|>"
43
+
44
+ def get_training_prompt(self) -> str:
45
+ prompt = self.system_token + "\n" + self.system + self.end_token + "\n"
46
+ if self.messages is None:
47
+ raise ValueError("Dialogue template must have at least one message.")
48
+ for message in self.messages:
49
+ if message["role"] == "user":
50
+ prompt += self.user_token + "\n" + message["content"] + self.end_token + "\n"
51
+ else:
52
+ prompt += self.assistant_token + "\n" + message["content"] + self.end_token + "\n"
53
+ return prompt
54
+
55
+ def get_inference_prompt(self) -> str:
56
+ prompt = self.system_token + "\n" + self.system + self.end_token + "\n"
57
+ if self.messages is None:
58
+ raise ValueError("Dialogue template must have at least one message.")
59
+ for message in self.messages:
60
+ if message["role"] == "user":
61
+ prompt += self.user_token + "\n" + message["content"] + self.end_token + "\n"
62
+ else:
63
+ prompt += self.assistant_token + "\n" + message["content"] + self.end_token + "\n"
64
+ prompt += self.assistant_token
65
+ return prompt
66
+
67
+ def get_dialogue(self):
68
+ """Helper function to format the messages as an easy-to-read dialogue."""
69
+ prompt = ""
70
+ if self.messages is None:
71
+ raise ValueError("Dialogue template must have at least one message.")
72
+ for message in self.messages:
73
+ if message["role"] == "user":
74
+ prompt += "\n\nHuman: " + message["content"]
75
+ else:
76
+ prompt += "\n\nAssistant: " + message["content"]
77
+ return prompt
78
+
79
+ def get_special_tokens(self) -> List[str]:
80
+ return [self.system_token, self.user_token, self.assistant_token, self.end_token]
81
+
82
+ def copy(self):
83
+ return DialogueTemplate(
84
+ system=self.system,
85
+ messages=self.messages,
86
+ system_token=self.system_token,
87
+ user_token=self.user_token,
88
+ assistant_token=self.assistant_token,
89
+ end_token=self.end_token,
90
+ )
91
+
92
+ def to_dict(self) -> Dict[str, Any]:
93
+ return {k: v for k, v in asdict(self).items()}
94
+
95
+ @classmethod
96
+ def from_dict(cls, data):
97
+ return DialogueTemplate(
98
+ system=data["system"] if "system" in data else "",
99
+ messages=data["messages"] if "messages" in data else None,
100
+ system_token=data["system_token"] if "system_token" in data else "<|system|>",
101
+ user_token=data["user_token"] if "user_token" in data else "<|user|>",
102
+ assistant_token=data["assistant_token"] if "assistant_token" in data else "<|assistant|>",
103
+ end_token=data["end_token"] if "end_token" in data else "<|end|>",
104
+ )
105
+
106
+ def _save_pretrained(self, save_directory: Union[str, Path]) -> None:
107
+ save_directory = Path(save_directory)
108
+ save_directory.mkdir(exist_ok=True)
109
+ with open(save_directory / "dialogue_template.json", "w") as f:
110
+ json.dump(self.to_dict(), f, indent=2)
111
+
112
+ @classmethod
113
+ def _from_pretrained(
114
+ cls: Type[T],
115
+ *,
116
+ model_id: str,
117
+ revision: Optional[str],
118
+ cache_dir: Optional[Union[str, Path]],
119
+ force_download: bool,
120
+ proxies: Optional[Dict],
121
+ resume_download: bool,
122
+ local_files_only: bool,
123
+ token: Optional[Union[str, bool]],
124
+ **model_kwargs,
125
+ ) -> T:
126
+ """Loads the dialogue template from a local directory or the Huggingface Hub.
127
+ Args:
128
+ model_id (`str`):
129
+ ID of the model to load from the Huggingface Hub (e.g. `bigscience/bloom`).
130
+ revision (`str`, *optional*):
131
+ Revision of the model on the Hub. Can be a branch name, a git tag or any commit id. Defaults to the
132
+ latest commit on `main` branch.
133
+ force_download (`bool`, *optional*, defaults to `False`):
134
+ Whether to force (re-)downloading the model weights and configuration files from the Hub, overriding
135
+ the existing cache.
136
+ resume_download (`bool`, *optional*, defaults to `False`):
137
+ Whether to delete incompletely received files. Will attempt to resume the download if such a file exists.
138
+ proxies (`Dict[str, str]`, *optional*):
139
+ A dictionary of proxy servers to use by protocol or endpoint (e.g., `{'http': 'foo.bar:3128',
140
+ 'http://hostname': 'foo.bar:4012'}`).
141
+ token (`str` or `bool`, *optional*):
142
+ The token to use as HTTP bearer authorization for remote files. By default, it will use the token
143
+ cached when running `huggingface-cli login`.
144
+ cache_dir (`str`, `Path`, *optional*):
145
+ Path to the folder where cached files are stored.
146
+ local_files_only (`bool`, *optional*, defaults to `False`):
147
+ If `True`, avoid downloading the file and return the path to the local cached file if it exists.
148
+ model_kwargs:
149
+ Additional keyword arguments passed along to the [`~ModelHubMixin._from_pretrained`] method.
150
+ """
151
+ if os.path.isdir(model_id): # Can either be a local directory
152
+ print("Loading dialogue template from local directory")
153
+ template_file = os.path.join(model_id, TEMPLATE_FILENAME)
154
+ else: # Or a template on the Hub
155
+ template_file = hf_hub_download( # Download from the hub, passing same input args
156
+ repo_id=model_id,
157
+ filename=TEMPLATE_FILENAME,
158
+ revision=revision,
159
+ cache_dir=cache_dir,
160
+ force_download=force_download,
161
+ proxies=proxies,
162
+ resume_download=resume_download,
163
+ token=token,
164
+ local_files_only=local_files_only,
165
+ )
166
+
167
+ # Load template
168
+ with open(template_file, "r") as f:
169
+ data = json.load(f)
170
+ return cls.from_dict(data=data)
171
+
172
+
173
+ # A shortened version of the system message in Anthropic's HHH prompt: https://gist.github.com/jareddk/2509330f8ef3d787fc5aaac67aab5f11#file-hhh_prompt-txt
174
+ default_template = DialogueTemplate(
175
+ system="A chat between a curious human and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user's questions.",
176
+ )
177
+
178
+ # OpenAI and OpenAssistant train on few to no system messages.
179
+ # TODO: consider defining this as the `default` template
180
+ no_system_template = DialogueTemplate(
181
+ system="",
182
+ )
183
+
184
+ alpaca_template = DialogueTemplate(
185
+ system="A chat between a curious human and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user's questions.",
186
+ user_token="### Instruction:",
187
+ assistant_token="### Response:",
188
+ )
189
+
190
+ SUPPORTED_DIALOGUE_TEMPLATES = {
191
+ "default": default_template,
192
+ "no_system": no_system_template,
193
+ "alpaca": alpaca_template,
194
+ }
195
+
196
+
197
+ def get_dialogue_template(template: str) -> DialogueTemplate:
198
+ if template not in SUPPORTED_DIALOGUE_TEMPLATES.keys():
199
+ raise ValueError(f"Template {template} is not supported!")
200
+ return SUPPORTED_DIALOGUE_TEMPLATES[template].copy()
201
+
202
+
203
+ def prepare_dialogue(example, dialogue_template, is_train=True):
204
+ """Format example to single- or multi-turn dialogue."""
205
+ # TODO: make this simpler by just ensuring every dataset has a messages column
206
+ if "messages" in example.keys() and example["messages"] is not None:
207
+ dialogue_template.messages = example["messages"]
208
+ elif all(k in example.keys() for k in ("prompt", "completion")):
209
+ # Construct single-turn dialogue from prompt and completion
210
+ dialogue_template.messages = [
211
+ {"role": "user", "content": example["prompt"]},
212
+ {"role": "assistant", "content": example["completion"]},
213
+ ]
214
+ elif "prompt" in example.keys():
215
+ # Construct single-turn dialogue from prompt (inference only)
216
+ dialogue_template.messages = [
217
+ {"role": "user", "content": example["prompt"]},
218
+ ]
219
+ else:
220
+ raise ValueError(
221
+ f"Could not format example as dialogue! Require either `messages` or `[prompt, completion]` or `[prompt]` keys but found {list(example.keys())}"
222
+ )
223
+ if is_train:
224
+ example["text"] = dialogue_template.get_training_prompt()
225
+ else:
226
+ example["text"] = dialogue_template.get_inference_prompt()
227
+ return example
228
+
229
+
230
+ def mask_user_labels(tokenizer, dialogue_template, labels):
231
+ """Masks the user turns of a dialogue from the loss"""
232
+ user_token_id = tokenizer.convert_tokens_to_ids(dialogue_template.user_token)
233
+ assistant_token_id = tokenizer.convert_tokens_to_ids(dialogue_template.assistant_token)
234
+ for idx, label_id in enumerate(labels):
235
+ if label_id == user_token_id:
236
+ current_idx = idx
237
+ while labels[current_idx] != assistant_token_id and current_idx < len(labels):
238
+ labels[current_idx] = IGNORE_INDEX
239
+ current_idx += 1
requirements.txt ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ einops
2
+ gradio
3
+ torch
4
+ transformers
5
+ sentencepiece
6
+ bitsandbytes
7
+ accelerate
8
+ text-generation
share_btn.py ADDED
@@ -0,0 +1,98 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ community_icon_html = """<svg id="share-btn-share-icon" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32">
2
+ <path d="M20.6081 3C21.7684 3 22.8053 3.49196 23.5284 4.38415C23.9756 4.93678 24.4428 5.82749 24.4808 7.16133C24.9674 7.01707 25.4353 6.93643 25.8725 6.93643C26.9833 6.93643 27.9865 7.37587 28.696 8.17411C29.6075 9.19872 30.0124 10.4579 29.8361 11.7177C29.7523 12.3177 29.5581 12.8555 29.2678 13.3534C29.8798 13.8646 30.3306 14.5763 30.5485 15.4322C30.719 16.1032 30.8939 17.5006 29.9808 18.9403C30.0389 19.0342 30.0934 19.1319 30.1442 19.2318C30.6932 20.3074 30.7283 21.5229 30.2439 22.6548C29.5093 24.3704 27.6841 25.7219 24.1397 27.1727C21.9347 28.0753 19.9174 28.6523 19.8994 28.6575C16.9842 29.4379 14.3477 29.8345 12.0653 29.8345C7.87017 29.8345 4.8668 28.508 3.13831 25.8921C0.356375 21.6797 0.754104 17.8269 4.35369 14.1131C6.34591 12.058 7.67023 9.02782 7.94613 8.36275C8.50224 6.39343 9.97271 4.20438 12.4172 4.20438H12.4179C12.6236 4.20438 12.8314 4.2214 13.0364 4.25468C14.107 4.42854 15.0428 5.06476 15.7115 6.02205C16.4331 5.09583 17.134 4.359 17.7682 3.94323C18.7242 3.31737 19.6794 3 20.6081 3ZM20.6081 5.95917C20.2427 5.95917 19.7963 6.1197 19.3039 6.44225C17.7754 7.44319 14.8258 12.6772 13.7458 14.7131C13.3839 15.3952 12.7655 15.6837 12.2086 15.6837C11.1036 15.6837 10.2408 14.5497 12.1076 13.1085C14.9146 10.9402 13.9299 7.39584 12.5898 7.1776C12.5311 7.16799 12.4731 7.16355 12.4172 7.16355C11.1989 7.16355 10.6615 9.33114 10.6615 9.33114C10.6615 9.33114 9.0863 13.4148 6.38031 16.206C3.67434 18.998 3.5346 21.2388 5.50675 24.2246C6.85185 26.2606 9.42666 26.8753 12.0653 26.8753C14.8021 26.8753 17.6077 26.2139 19.1799 25.793C19.2574 25.7723 28.8193 22.984 27.6081 20.6107C27.4046 20.212 27.0693 20.0522 26.6471 20.0522C24.9416 20.0522 21.8393 22.6726 20.5057 22.6726C20.2076 22.6726 19.9976 22.5416 19.9116 22.222C19.3433 20.1173 28.552 19.2325 27.7758 16.1839C27.639 15.6445 27.2677 15.4256 26.746 15.4263C24.4923 15.4263 19.4358 19.5181 18.3759 19.5181C18.2949 19.5181 18.2368 19.4937 18.2053 19.4419C17.6743 18.557 17.9653 17.9394 21.7082 15.6009C25.4511 13.2617 28.0783 11.8545 26.5841 10.1752C26.4121 9.98141 26.1684 9.8956 25.8725 9.8956C23.6001 9.89634 18.2311 14.9403 18.2311 14.9403C18.2311 14.9403 16.7821 16.496 15.9057 16.496C15.7043 16.496 15.533 16.4139 15.4169 16.2112C14.7956 15.1296 21.1879 10.1286 21.5484 8.06535C21.7928 6.66715 21.3771 5.95917 20.6081 5.95917Z" fill="#FF9D00"></path>
3
+ <path d="M5.50686 24.2246C3.53472 21.2387 3.67446 18.9979 6.38043 16.206C9.08641 13.4147 10.6615 9.33111 10.6615 9.33111C10.6615 9.33111 11.2499 6.95933 12.59 7.17757C13.93 7.39581 14.9139 10.9401 12.1069 13.1084C9.29997 15.276 12.6659 16.7489 13.7459 14.713C14.8258 12.6772 17.7747 7.44316 19.304 6.44221C20.8326 5.44128 21.9089 6.00204 21.5484 8.06532C21.188 10.1286 14.795 15.1295 15.4171 16.2118C16.0391 17.2934 18.2312 14.9402 18.2312 14.9402C18.2312 14.9402 25.0907 8.49588 26.5842 10.1752C28.0776 11.8545 25.4512 13.2616 21.7082 15.6008C17.9646 17.9393 17.6744 18.557 18.2054 19.4418C18.7372 20.3266 26.9998 13.1351 27.7759 16.1838C28.5513 19.2324 19.3434 20.1173 19.9117 22.2219C20.48 24.3274 26.3979 18.2382 27.6082 20.6107C28.8193 22.9839 19.2574 25.7722 19.18 25.7929C16.0914 26.62 8.24723 28.3726 5.50686 24.2246Z" fill="#FFD21E"></path>
4
+ </svg>"""
5
+
6
+ loading_icon_html = """<svg id="share-btn-loading-icon" style="display:none;" class="animate-spin"
7
+ style="color: #ffffff;
8
+ "
9
+ xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" fill="none" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><circle style="opacity: 0.25;" cx="12" cy="12" r="10" stroke="white" stroke-width="4"></circle><path style="opacity: 0.75;" fill="white" d="M4 12a8 8 0 018-8V0C5.373 0 0 5.373 0 12h4zm2 5.291A7.962 7.962 0 014 12H0c0 3.042 1.135 5.824 3 7.938l3-2.647z"></path></svg>"""
10
+
11
+ share_js = """async () => {
12
+ async function uploadFile(file){
13
+ const UPLOAD_URL = 'https://huggingface.co/uploads';
14
+ const response = await fetch(UPLOAD_URL, {
15
+ method: 'POST',
16
+ headers: {
17
+ 'Content-Type': file.type,
18
+ 'X-Requested-With': 'XMLHttpRequest',
19
+ },
20
+ body: file, /// <- File inherits from Blob
21
+ });
22
+ const url = await response.text();
23
+ return url;
24
+ }
25
+ async function getInputImgFile(imgEl){
26
+ const res = await fetch(imgEl.src);
27
+ const blob = await res.blob();
28
+ const imgId = Date.now() % 200;
29
+ const isPng = imgEl.src.startsWith(`data:image/png`);
30
+ if(isPng){
31
+ const fileName = `sd-perception-${{imgId}}.png`;
32
+ return new File([blob], fileName, { type: 'image/png' });
33
+ }else{
34
+ const fileName = `sd-perception-${{imgId}}.jpg`;
35
+ return new File([blob], fileName, { type: 'image/jpeg' });
36
+ }
37
+ }
38
+ // const gradioEl = document.querySelector('body > gradio-app');
39
+ const gradioEl = document.querySelector("gradio-app");
40
+ const inputTxt = gradioEl.querySelector('#q-input textarea').value;
41
+ const outputTxt = gradioEl.querySelector('#q-output').outerHTML;
42
+ const titleLength = 150;
43
+ let titleTxt = inputTxt;
44
+ if(titleTxt.length > titleLength){
45
+ titleTxt = titleTxt.slice(0, titleLength) + ' ...';
46
+ }
47
+ const shareBtnEl = gradioEl.querySelector('#share-btn');
48
+ const shareIconEl = gradioEl.querySelector('#share-btn-share-icon');
49
+ const loadingIconEl = gradioEl.querySelector('#share-btn-loading-icon');
50
+ if(!inputTxt || !outputTxt){
51
+ return;
52
+ };
53
+ shareBtnEl.style.pointerEvents = 'none';
54
+ shareIconEl.style.display = 'none';
55
+ loadingIconEl.style.removeProperty('display');
56
+ const descriptionMd = `### Question:
57
+ ${inputTxt}
58
+ ### Answer:
59
+ ${outputTxt}`;
60
+ const params = {
61
+ title: titleTxt,
62
+ description: descriptionMd,
63
+ };
64
+ const paramsStr = Object.entries(params)
65
+ .map(([key, value]) => `${encodeURIComponent(key)}=${encodeURIComponent(value)}`)
66
+ .join('&');
67
+ window.open(`https://huggingface.co/spaces/HuggingFaceH4/star-chat-demo/discussions/new?${paramsStr}`, '_blank');
68
+ shareBtnEl.style.removeProperty('pointer-events');
69
+ shareIconEl.style.removeProperty('display');
70
+ loadingIconEl.style.display = 'none';
71
+ }"""
72
+
73
+ share_btn_css = """
74
+ a {text-decoration-line: underline; font-weight: 600;}
75
+ .animate-spin {
76
+ animation: spin 1s linear infinite;
77
+ }
78
+ @keyframes spin {
79
+ from { transform: rotate(0deg); }
80
+ to { transform: rotate(360deg); }
81
+ }
82
+ #share-btn-container {
83
+ display: flex; padding-left: 0.5rem !important; padding-right: 0.5rem !important; background-color: #000000; justify-content: center; align-items: center; border-radius: 9999px !important; width: 13rem;
84
+ }
85
+ #share-btn {
86
+ all: initial; color: #ffffff;font-weight: 600; cursor:pointer; font-family: 'IBM Plex Sans', sans-serif; margin-left: 0.5rem !important; padding-top: 0.25rem !important; padding-bottom: 0.25rem !important;
87
+ }
88
+ #share-btn * {
89
+ all: unset;
90
+ }
91
+ #share-btn-container div:nth-child(-n+2){
92
+ width: auto !important;
93
+ min-height: 0px !important;
94
+ }
95
+ #share-btn-container .wrap {
96
+ display: none !important;
97
+ }
98
+ """