Tom Aarsen commited on
Commit
777b2a0
1 Parent(s): adedf8e

Propagate token through to all ST/HF_hub functions

Browse files
Files changed (2) hide show
  1. README.md +4 -0
  2. app.py +70 -42
README.md CHANGED
@@ -10,6 +10,10 @@ pinned: false
10
  license: apache-2.0
11
  short_description: Export Sentence Transformer models to accelerated backends
12
  hf_oauth: true
 
 
 
 
13
  ---
14
 
15
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
10
  license: apache-2.0
11
  short_description: Export Sentence Transformer models to accelerated backends
12
  hf_oauth: true
13
+ hf_oauth_scopes:
14
+ - manage-repos
15
+ - write-repos
16
+ - write-discussions
17
  ---
18
 
19
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py CHANGED
@@ -1,15 +1,17 @@
1
  from enum import Enum
 
2
  from pathlib import Path
3
- from typing import Tuple
4
  import gradio as gr
5
  from gradio_huggingfacehub_search import HuggingfaceHubSearch
 
6
  from sentence_transformers import SentenceTransformer
7
  from sentence_transformers import (
8
  export_dynamic_quantized_onnx_model as st_export_dynamic_quantized_onnx_model,
9
  export_optimized_onnx_model as st_export_optimized_onnx_model,
10
  export_static_quantized_openvino_model as st_export_static_quantized_openvino_model,
11
  )
12
- from huggingface_hub import model_info, upload_folder, whoami, get_repo_discussions, list_repo_commits, HfFileSystem
13
  from huggingface_hub.errors import RepositoryNotFoundError
14
  from optimum.intel import OVQuantizationConfig
15
  from tempfile import TemporaryDirectory
@@ -78,7 +80,7 @@ def export_to_torch(model_id, create_pr, output_model_id):
78
  )
79
 
80
 
81
- def export_to_onnx(model_id: str, create_pr: bool, output_model_id: str):
82
  if does_file_glob_exist(output_model_id, "**/model.onnx"):
83
  raise FileExistsError("An ONNX model already exists in the repository")
84
 
@@ -91,6 +93,7 @@ def export_to_onnx(model_id: str, create_pr: bool, output_model_id: str):
91
  repo_id=output_model_id,
92
  commit_message=commit_message,
93
  create_pr=create_pr,
 
94
  )
95
  else:
96
  with TemporaryDirectory() as tmp_dir:
@@ -133,6 +136,7 @@ print(similarities)
133
  commit_message=commit_message,
134
  commit_description=commit_description if create_pr else None,
135
  create_pr=create_pr,
 
136
  )
137
 
138
  def export_to_onnx_snippet(model_id: str, create_pr: bool, output_model_id: str) -> str:
@@ -175,7 +179,7 @@ similarities = model.similarity(embeddings, embeddings)
175
 
176
 
177
  def export_to_onnx_dynamic_quantization(
178
- model_id: str, create_pr: bool, output_model_id: str, onnx_quantization_config: str
179
  ) -> None:
180
  if does_file_glob_exist(output_model_id, f"onnx/model_qint8_{onnx_quantization_config}.onnx"):
181
  raise FileExistsError("The quantized ONNX model already exists in the repository")
@@ -183,8 +187,11 @@ def export_to_onnx_dynamic_quantization(
183
  model = SentenceTransformer(model_id, backend="onnx")
184
 
185
  if not create_pr and is_new_model(output_model_id):
186
- model.push_to_hub(repo_id=output_model_id)
187
 
 
 
 
188
  try:
189
  st_export_dynamic_quantized_onnx_model(
190
  model,
@@ -203,6 +210,8 @@ def export_to_onnx_dynamic_quantization(
203
  push_to_hub=True,
204
  create_pr=create_pr,
205
  )
 
 
206
 
207
  def export_to_onnx_dynamic_quantization_snippet(
208
  model_id: str, create_pr: bool, output_model_id: str, onnx_quantization_config: str
@@ -249,22 +258,28 @@ embeddings = model.encode(["The weather is lovely today.", "It's so sunny outsid
249
  similarities = model.similarity(embeddings, embeddings)
250
  """
251
 
252
- def export_to_onnx_optimization(model_id: str, create_pr: bool, output_model_id: str, onnx_optimization_config: str) -> None:
253
  if does_file_glob_exist(output_model_id, f"onnx/model_{onnx_optimization_config}.onnx"):
254
  raise FileExistsError("The optimized ONNX model already exists in the repository")
255
 
256
  model = SentenceTransformer(model_id, backend="onnx")
257
 
258
  if not create_pr and is_new_model(output_model_id):
259
- model.push_to_hub(repo_id=output_model_id)
260
 
261
- st_export_optimized_onnx_model(
262
- model,
263
- optimization_config=onnx_optimization_config,
264
- model_name_or_path=output_model_id,
265
- push_to_hub=True,
266
- create_pr=create_pr,
267
- )
 
 
 
 
 
 
268
 
269
  def export_to_onnx_optimization_snippet(model_id: str, create_pr: bool, output_model_id: str, onnx_optimization_config: str) -> str:
270
  return """\
@@ -310,7 +325,7 @@ similarities = model.similarity(embeddings, embeddings)
310
  """
311
 
312
 
313
- def export_to_openvino(model_id: str, create_pr: bool, output_model_id: str) -> None:
314
  if does_file_glob_exist(output_model_id, "**/openvino_model.xml"):
315
  raise FileExistsError("The OpenVINO model already exists in the repository")
316
 
@@ -323,6 +338,7 @@ def export_to_openvino(model_id: str, create_pr: bool, output_model_id: str) ->
323
  repo_id=output_model_id,
324
  commit_message=commit_message,
325
  create_pr=create_pr,
 
326
  )
327
  else:
328
  with TemporaryDirectory() as tmp_dir:
@@ -365,6 +381,7 @@ print(similarities)
365
  commit_message=commit_message,
366
  commit_description=commit_description if create_pr else None,
367
  create_pr=create_pr,
 
368
  )
369
 
370
  def export_to_openvino_snippet(model_id: str, create_pr: bool, output_model_id: str) -> str:
@@ -412,6 +429,7 @@ def export_to_openvino_static_quantization(
412
  ov_quant_dataset_split: str,
413
  ov_quant_dataset_column_name: str,
414
  ov_quant_dataset_num_samples: int,
 
415
  ) -> None:
416
  if does_file_glob_exist(output_model_id, "openvino/openvino_model_qint8_quantized.xml"):
417
  raise FileExistsError("The quantized OpenVINO model already exists in the repository")
@@ -419,21 +437,27 @@ def export_to_openvino_static_quantization(
419
  model = SentenceTransformer(model_id, backend="openvino")
420
 
421
  if not create_pr and is_new_model(output_model_id):
422
- model.push_to_hub(repo_id=output_model_id)
423
-
424
- st_export_static_quantized_openvino_model(
425
- model,
426
- quantization_config=OVQuantizationConfig(
427
- num_samples=ov_quant_dataset_num_samples,
428
- ),
429
- model_name_or_path=output_model_id,
430
- dataset_name=ov_quant_dataset_name,
431
- dataset_config_name=ov_quant_dataset_subset,
432
- dataset_split=ov_quant_dataset_split,
433
- column_name=ov_quant_dataset_column_name,
434
- push_to_hub=True,
435
- create_pr=create_pr,
436
- )
 
 
 
 
 
 
437
 
438
  def export_to_openvino_static_quantization_snippet(
439
  model_id: str,
@@ -505,7 +529,11 @@ def on_submit(
505
  ov_quant_dataset_column_name,
506
  ov_quant_dataset_num_samples,
507
  inference_snippet: str,
 
 
508
  ):
 
 
509
 
510
  if not model_id:
511
  return "Commit or PR url:<br>...", inference_snippet, gr.Textbox("Please enter a model ID", visible=True)
@@ -514,26 +542,23 @@ def on_submit(
514
  return "Commit or PR url:<br>...", inference_snippet, gr.Textbox("The source model must have a Sentence Transformers tag", visible=True)
515
 
516
  if output_model_id and "/" not in output_model_id:
517
- try:
518
- output_model_id = f"{whoami()['name']}/{output_model_id}"
519
- except Exception:
520
- return "Commit or PR url:<br>...", inference_snippet, gr.Textbox("You might be signed in with Hugging Face to use this Space", visible=True)
521
 
522
  output_model_id = output_model_id if not create_pr else model_id
523
 
524
  try:
525
  if backend == Backend.ONNX.value:
526
- export_to_onnx(model_id, create_pr, output_model_id)
527
  elif backend == Backend.ONNX_DYNAMIC_QUANTIZATION.value:
528
  export_to_onnx_dynamic_quantization(
529
- model_id, create_pr, output_model_id, onnx_quantization_config
530
  )
531
  elif backend == Backend.ONNX_OPTIMIZATION.value:
532
  export_to_onnx_optimization(
533
- model_id, create_pr, output_model_id, onnx_optimization_config
534
  )
535
  elif backend == Backend.OPENVINO.value:
536
- export_to_openvino(model_id, create_pr, output_model_id)
537
  elif backend == Backend.OPENVINO_STATIC_QUANTIZATION.value:
538
  export_to_openvino_static_quantization(
539
  model_id,
@@ -544,6 +569,7 @@ def on_submit(
544
  ov_quant_dataset_split,
545
  ov_quant_dataset_column_name,
546
  ov_quant_dataset_num_samples,
 
547
  )
548
  except FileExistsError as exc:
549
  return "Commit or PR url:<br>...", inference_snippet, gr.Textbox(str(exc), visible=True)
@@ -572,15 +598,17 @@ def on_change(
572
  ov_quant_dataset_split,
573
  ov_quant_dataset_column_name,
574
  ov_quant_dataset_num_samples,
 
 
575
  ) -> str:
 
 
 
576
  if not model_id:
577
  return "", "", "", gr.Textbox("Please enter a model ID", visible=True)
578
 
579
  if output_model_id and "/" not in output_model_id:
580
- try:
581
- output_model_id = f"{whoami()['name']}/{output_model_id}"
582
- except Exception:
583
- return "", "", "", gr.Textbox("You might be signed in with Hugging Face to use this Space", visible=True)
584
 
585
  output_model_id = output_model_id if not create_pr else model_id
586
 
 
1
  from enum import Enum
2
+ from functools import partial
3
  from pathlib import Path
4
+ from typing import Optional, Tuple
5
  import gradio as gr
6
  from gradio_huggingfacehub_search import HuggingfaceHubSearch
7
+ import huggingface_hub
8
  from sentence_transformers import SentenceTransformer
9
  from sentence_transformers import (
10
  export_dynamic_quantized_onnx_model as st_export_dynamic_quantized_onnx_model,
11
  export_optimized_onnx_model as st_export_optimized_onnx_model,
12
  export_static_quantized_openvino_model as st_export_static_quantized_openvino_model,
13
  )
14
+ from huggingface_hub import model_info, upload_folder, get_repo_discussions, list_repo_commits, HfFileSystem
15
  from huggingface_hub.errors import RepositoryNotFoundError
16
  from optimum.intel import OVQuantizationConfig
17
  from tempfile import TemporaryDirectory
 
80
  )
81
 
82
 
83
+ def export_to_onnx(model_id: str, create_pr: bool, output_model_id: str, token: Optional[str] = None) -> None:
84
  if does_file_glob_exist(output_model_id, "**/model.onnx"):
85
  raise FileExistsError("An ONNX model already exists in the repository")
86
 
 
93
  repo_id=output_model_id,
94
  commit_message=commit_message,
95
  create_pr=create_pr,
96
+ token=token,
97
  )
98
  else:
99
  with TemporaryDirectory() as tmp_dir:
 
136
  commit_message=commit_message,
137
  commit_description=commit_description if create_pr else None,
138
  create_pr=create_pr,
139
+ token=token,
140
  )
141
 
142
  def export_to_onnx_snippet(model_id: str, create_pr: bool, output_model_id: str) -> str:
 
179
 
180
 
181
  def export_to_onnx_dynamic_quantization(
182
+ model_id: str, create_pr: bool, output_model_id: str, onnx_quantization_config: str, token: Optional[str] = None
183
  ) -> None:
184
  if does_file_glob_exist(output_model_id, f"onnx/model_qint8_{onnx_quantization_config}.onnx"):
185
  raise FileExistsError("The quantized ONNX model already exists in the repository")
 
187
  model = SentenceTransformer(model_id, backend="onnx")
188
 
189
  if not create_pr and is_new_model(output_model_id):
190
+ model.push_to_hub(repo_id=output_model_id, token=token)
191
 
192
+ # Monkey-patch the upload_folder function to include the token, as it's not used in export_dynamic_quantized_onnx_model
193
+ original_upload_folder = huggingface_hub.upload_folder
194
+ huggingface_hub.upload_folder = partial(original_upload_folder, token=token)
195
  try:
196
  st_export_dynamic_quantized_onnx_model(
197
  model,
 
210
  push_to_hub=True,
211
  create_pr=create_pr,
212
  )
213
+ finally:
214
+ huggingface_hub.upload_folder = original_upload_folder
215
 
216
  def export_to_onnx_dynamic_quantization_snippet(
217
  model_id: str, create_pr: bool, output_model_id: str, onnx_quantization_config: str
 
258
  similarities = model.similarity(embeddings, embeddings)
259
  """
260
 
261
+ def export_to_onnx_optimization(model_id: str, create_pr: bool, output_model_id: str, onnx_optimization_config: str, token: Optional[str] = None) -> None:
262
  if does_file_glob_exist(output_model_id, f"onnx/model_{onnx_optimization_config}.onnx"):
263
  raise FileExistsError("The optimized ONNX model already exists in the repository")
264
 
265
  model = SentenceTransformer(model_id, backend="onnx")
266
 
267
  if not create_pr and is_new_model(output_model_id):
268
+ model.push_to_hub(repo_id=output_model_id, token=token)
269
 
270
+ # Monkey-patch the upload_folder function to include the token, as it's not used in export_optimized_onnx_model
271
+ original_upload_folder = huggingface_hub.upload_folder
272
+ huggingface_hub.upload_folder = partial(original_upload_folder, token=token)
273
+ try:
274
+ st_export_optimized_onnx_model(
275
+ model,
276
+ optimization_config=onnx_optimization_config,
277
+ model_name_or_path=output_model_id,
278
+ push_to_hub=True,
279
+ create_pr=create_pr,
280
+ )
281
+ finally:
282
+ huggingface_hub.upload_folder = original_upload_folder
283
 
284
  def export_to_onnx_optimization_snippet(model_id: str, create_pr: bool, output_model_id: str, onnx_optimization_config: str) -> str:
285
  return """\
 
325
  """
326
 
327
 
328
+ def export_to_openvino(model_id: str, create_pr: bool, output_model_id: str, token: Optional[str] = None) -> None:
329
  if does_file_glob_exist(output_model_id, "**/openvino_model.xml"):
330
  raise FileExistsError("The OpenVINO model already exists in the repository")
331
 
 
338
  repo_id=output_model_id,
339
  commit_message=commit_message,
340
  create_pr=create_pr,
341
+ token=token,
342
  )
343
  else:
344
  with TemporaryDirectory() as tmp_dir:
 
381
  commit_message=commit_message,
382
  commit_description=commit_description if create_pr else None,
383
  create_pr=create_pr,
384
+ token=token,
385
  )
386
 
387
  def export_to_openvino_snippet(model_id: str, create_pr: bool, output_model_id: str) -> str:
 
429
  ov_quant_dataset_split: str,
430
  ov_quant_dataset_column_name: str,
431
  ov_quant_dataset_num_samples: int,
432
+ token: Optional[str] = None,
433
  ) -> None:
434
  if does_file_glob_exist(output_model_id, "openvino/openvino_model_qint8_quantized.xml"):
435
  raise FileExistsError("The quantized OpenVINO model already exists in the repository")
 
437
  model = SentenceTransformer(model_id, backend="openvino")
438
 
439
  if not create_pr and is_new_model(output_model_id):
440
+ model.push_to_hub(repo_id=output_model_id, token=token)
441
+
442
+ # Monkey-patch the upload_folder function to include the token, as it's not used in export_static_quantized_openvino_model
443
+ original_upload_folder = huggingface_hub.upload_folder
444
+ huggingface_hub.upload_folder = partial(original_upload_folder, token=token)
445
+ try:
446
+ st_export_static_quantized_openvino_model(
447
+ model,
448
+ quantization_config=OVQuantizationConfig(
449
+ num_samples=ov_quant_dataset_num_samples,
450
+ ),
451
+ model_name_or_path=output_model_id,
452
+ dataset_name=ov_quant_dataset_name,
453
+ dataset_config_name=ov_quant_dataset_subset,
454
+ dataset_split=ov_quant_dataset_split,
455
+ column_name=ov_quant_dataset_column_name,
456
+ push_to_hub=True,
457
+ create_pr=create_pr,
458
+ )
459
+ finally:
460
+ huggingface_hub.upload_folder = original_upload_folder
461
 
462
  def export_to_openvino_static_quantization_snippet(
463
  model_id: str,
 
529
  ov_quant_dataset_column_name,
530
  ov_quant_dataset_num_samples,
531
  inference_snippet: str,
532
+ oauth_token: Optional[gr.OAuthToken] = None,
533
+ profile: Optional[gr.OAuthProfile] = None,
534
  ):
535
+ if oauth_token is None or profile is None:
536
+ return "Commit or PR url:<br>...", inference_snippet, gr.Textbox("Please sign in with Hugging Face to use this Space", visible=True)
537
 
538
  if not model_id:
539
  return "Commit or PR url:<br>...", inference_snippet, gr.Textbox("Please enter a model ID", visible=True)
 
542
  return "Commit or PR url:<br>...", inference_snippet, gr.Textbox("The source model must have a Sentence Transformers tag", visible=True)
543
 
544
  if output_model_id and "/" not in output_model_id:
545
+ output_model_id = f"{profile.name}/{output_model_id}"
 
 
 
546
 
547
  output_model_id = output_model_id if not create_pr else model_id
548
 
549
  try:
550
  if backend == Backend.ONNX.value:
551
+ export_to_onnx(model_id, create_pr, output_model_id, token=oauth_token.token)
552
  elif backend == Backend.ONNX_DYNAMIC_QUANTIZATION.value:
553
  export_to_onnx_dynamic_quantization(
554
+ model_id, create_pr, output_model_id, onnx_quantization_config, token=oauth_token.token
555
  )
556
  elif backend == Backend.ONNX_OPTIMIZATION.value:
557
  export_to_onnx_optimization(
558
+ model_id, create_pr, output_model_id, onnx_optimization_config, token=oauth_token.token
559
  )
560
  elif backend == Backend.OPENVINO.value:
561
+ export_to_openvino(model_id, create_pr, output_model_id, token=oauth_token.token)
562
  elif backend == Backend.OPENVINO_STATIC_QUANTIZATION.value:
563
  export_to_openvino_static_quantization(
564
  model_id,
 
569
  ov_quant_dataset_split,
570
  ov_quant_dataset_column_name,
571
  ov_quant_dataset_num_samples,
572
+ token=oauth_token.token,
573
  )
574
  except FileExistsError as exc:
575
  return "Commit or PR url:<br>...", inference_snippet, gr.Textbox(str(exc), visible=True)
 
598
  ov_quant_dataset_split,
599
  ov_quant_dataset_column_name,
600
  ov_quant_dataset_num_samples,
601
+ oauth_token: Optional[gr.OAuthToken] = None,
602
+ profile: Optional[gr.OAuthProfile] = None,
603
  ) -> str:
604
+ if oauth_token is None or profile is None:
605
+ return "", "", "", gr.Textbox("Please sign in with Hugging Face to use this Space", visible=True)
606
+
607
  if not model_id:
608
  return "", "", "", gr.Textbox("Please enter a model ID", visible=True)
609
 
610
  if output_model_id and "/" not in output_model_id:
611
+ output_model_id = f"{profile.username}/{output_model_id}"
 
 
 
612
 
613
  output_model_id = output_model_id if not create_pr else model_id
614