AMKCode commited on
Commit
7d5c4eb
1 Parent(s): 02b66bf

updated readme template

Browse files
Files changed (1) hide show
  1. app.py +66 -53
app.py CHANGED
@@ -104,6 +104,67 @@ SUPPORTED_MODEL_TYPES = ['llama',
104
  'cohere',
105
  'minicpm']
106
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
107
 
108
  def button_click(hf_model_id, conv_template, quantization, oauth_token: gr.OAuthToken | None, progress=gr.Progress()):
109
  if oauth_token.token is None:
@@ -180,59 +241,11 @@ def button_click(hf_model_id, conv_template, quantization, oauth_token: gr.OAuth
180
  card.data.tags.append("MLC-Weight-Conversion")
181
  card.data.base_model = hf_model_id
182
 
183
- card.text = dedent(
184
- f"""
185
- # {mlc_model_name}
186
-
187
- This is the [{model_dir_name}]({HF_PATH}{hf_model_id}) model in MLC format `{quantization}`.
188
- The conversion was done using the [MLC-Weight-Conversion](https://huggingface.co/spaces/mlc-ai/MLC-Weight-Conversion) space.
189
- The model can be used for projects [MLC-LLM](https://github.com/mlc-ai/mlc-llm).
190
-
191
- ## Example Usage
192
-
193
- Here are some examples of using this model in MLC LLM.
194
- Before running the examples, please install MLC LLM by following the [installation documentation](https://llm.mlc.ai/docs/install/mlc_llm.html#install-mlc-packages).
195
-
196
- ### Chat
197
-
198
- In command line, run
199
- ```bash
200
- mlc_llm chat HF://{created_repo_id}
201
- ```
202
-
203
- ### REST Server
204
-
205
- In command line, run
206
- ```bash
207
- mlc_llm serve HF://{created_repo_id}
208
- ```
209
-
210
- ### Python API
211
-
212
- ```python
213
- from mlc_llm import MLCEngine
214
-
215
- # Create engine
216
- model = "HF://{created_repo_id}"
217
- engine = MLCEngine(model)
218
-
219
- # Run chat completion in OpenAI API.
220
- for response in engine.chat.completions.create(
221
- messages=[{"role": "user", "content": "What is the meaning of life?"}],
222
- model=model,
223
- stream=True,
224
- ):
225
- for choice in response.choices:
226
- print(choice.delta.content, end="", flush=True)
227
- print("\n")
228
-
229
- engine.terminate()
230
- ```
231
-
232
- ## Documentation
233
-
234
- For more information on MLC LLM project, please visit our [documentation](https://llm.mlc.ai/docs/) and [GitHub repo](http://github.com/mlc-ai/mlc-llm).
235
- """
236
  )
237
  card.save("./dist/README.md")
238
 
 
104
  'cohere',
105
  'minicpm']
106
 
107
+ readme_template = """
108
+ ---
109
+ library_name: mlc-llm
110
+ base_model: {base_model}
111
+ tags:
112
+ - mlc-llm
113
+ - web-llm
114
+ ---
115
+
116
+ # {model_name}
117
+
118
+ This is the [{base_model_name}](https://huggingface.co/{base_model}) model in MLC format `{quant_format}`.
119
+ The conversion was done using the [MLC-Weight-Conversion](https://huggingface.co/spaces/mlc-ai/MLC-Weight-Conversion) space.
120
+ The model can be used for projects [MLC-LLM](https://github.com/mlc-ai/mlc-llm) and [WebLLM](https://github.com/mlc-ai/web-llm).
121
+
122
+ ## Example Usage
123
+
124
+ Here are some examples of using this model in MLC LLM.
125
+ Before running the examples, please install MLC LLM by following the [installation documentation](https://llm.mlc.ai/docs/install/mlc_llm.html#install-mlc-packages).
126
+
127
+ ### Chat
128
+
129
+ In command line, run
130
+ ```bash
131
+ mlc_llm chat HF://mlc-ai/{model_name}
132
+ ```
133
+
134
+ ### REST Server
135
+
136
+ In command line, run
137
+ ```bash
138
+ mlc_llm serve HF://mlc-ai/{model_name}
139
+ ```
140
+
141
+ ### Python API
142
+
143
+ ```python
144
+ from mlc_llm import MLCEngine
145
+
146
+ # Create engine
147
+ model = "HF://mlc-ai/{model_name}"
148
+ engine = MLCEngine(model)
149
+
150
+ # Run chat completion in OpenAI API.
151
+ for response in engine.chat.completions.create(
152
+ messages=[{{"role": "user", "content": "What is the meaning of life?"}}],
153
+ model=model,
154
+ stream=True,
155
+ ):
156
+ for choice in response.choices:
157
+ print(choice.delta.content, end="", flush=True)
158
+ print("\\n")
159
+
160
+ engine.terminate()
161
+ ```
162
+
163
+ ## Documentation
164
+
165
+ For more information on MLC LLM project, please visit our [documentation](https://llm.mlc.ai/docs/) and [GitHub repo](http://github.com/mlc-ai/mlc-llm).
166
+ """.strip()
167
+
168
 
169
  def button_click(hf_model_id, conv_template, quantization, oauth_token: gr.OAuthToken | None, progress=gr.Progress()):
170
  if oauth_token.token is None:
 
241
  card.data.tags.append("MLC-Weight-Conversion")
242
  card.data.base_model = hf_model_id
243
 
244
+ card.text = readme_template.format(
245
+ model_name=f"{user_name}/{mlc_model_name}",
246
+ base_model=hf_model_id,
247
+ base_model_name=model_dir_name,
248
+ quant_format=quantization,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
249
  )
250
  card.save("./dist/README.md")
251