nielsr HF staff commited on
Commit
96b9145
1 Parent(s): da2b143

Add upload_file

Browse files
Files changed (1) hide show
  1. modeling_cogvlm.py +32 -22
modeling_cogvlm.py CHANGED
@@ -440,29 +440,29 @@ class CogVLMModel(CogVLMPreTrainedModel):
440
 
441
  from huggingface_hub import HfApi
442
 
443
- torch.save(images_features, "images_features.pt")
444
- torch.save(inputs_embeds, "inputs_embeds.pt")
445
- torch.save(token_type_ids, "token_type_ids.pt")
446
 
447
- api = HfApi()
448
- api.upload_file(
449
- path_or_fileobj="images_features.pt",
450
- path_in_repo="images_features.pt",
451
- repo_id="nielsr/test-cogvlm",
452
- repo_type="dataset",
453
- )
454
- api.upload_file(
455
- path_or_fileobj="inputs_embeds.pt",
456
- path_in_repo="inputs_embeds.pt",
457
- repo_id="nielsr/test-cogvlm",
458
- repo_type="dataset",
459
- )
460
- api.upload_file(
461
- path_or_fileobj="token_type_ids.pt",
462
- path_in_repo="token_type_ids.pt",
463
- repo_id="nielsr/test-cogvlm",
464
- repo_type="dataset",
465
- )
466
 
467
  print("First values of text embeddings:", inputs_embeds[0, :3, :3])
468
  print("First values of images_features:", images_features[0, :3])
@@ -557,6 +557,16 @@ class CogVLMModel(CogVLMPreTrainedModel):
557
 
558
  hidden_states = inputs_embeds
559
 
 
 
 
 
 
 
 
 
 
 
560
  # decoder layers
561
  all_hidden_states = () if output_hidden_states else None
562
  all_self_attns = () if output_attentions else None
 
440
 
441
  from huggingface_hub import HfApi
442
 
443
+ # torch.save(images_features, "images_features.pt")
444
+ # torch.save(inputs_embeds, "inputs_embeds.pt")
445
+ # torch.save(token_type_ids, "token_type_ids.pt")
446
 
447
+ # api = HfApi()
448
+ # api.upload_file(
449
+ # path_or_fileobj="images_features.pt",
450
+ # path_in_repo="images_features.pt",
451
+ # repo_id="nielsr/test-cogvlm",
452
+ # repo_type="dataset",
453
+ # )
454
+ # api.upload_file(
455
+ # path_or_fileobj="inputs_embeds.pt",
456
+ # path_in_repo="inputs_embeds.pt",
457
+ # repo_id="nielsr/test-cogvlm",
458
+ # repo_type="dataset",
459
+ # )
460
+ # api.upload_file(
461
+ # path_or_fileobj="token_type_ids.pt",
462
+ # path_in_repo="token_type_ids.pt",
463
+ # repo_id="nielsr/test-cogvlm",
464
+ # repo_type="dataset",
465
+ # )
466
 
467
  print("First values of text embeddings:", inputs_embeds[0, :3, :3])
468
  print("First values of images_features:", images_features[0, :3])
 
557
 
558
  hidden_states = inputs_embeds
559
 
560
+ torch.save(hidden_states, "inputs_embeds.pt")
561
+
562
+ api = HfApi()
563
+ api.upload_file(
564
+ path_or_fileobj="inputs_embeds.pt",
565
+ path_in_repo="inputs_embeds.pt",
566
+ repo_id="nielsr/test-cogvlm",
567
+ repo_type="dataset",
568
+ )
569
+
570
  # decoder layers
571
  all_hidden_states = () if output_hidden_states else None
572
  all_self_attns = () if output_attentions else None