pere commited on
Commit
40f0990
·
2 Parent(s): 451d251424130b
config.json CHANGED
@@ -1,5 +1,5 @@
1
  {
2
- "_name_or_path": "./",
3
  "architectures": [
4
  "XLMRobertaForMaskedLM"
5
  ],
 
1
  {
2
+ "_name_or_path": "./config.json",
3
  "architectures": [
4
  "XLMRobertaForMaskedLM"
5
  ],
events.out.tfevents.1672827687.t1v-n-0853dee6-w-3.589505.0.v2 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:87dd3c9ea4eac2a9c6beb64231d3b0de4e7b939d4dbec02a915dc686db38f2f4
3
+ size 7637
events.out.tfevents.1672828479.t1v-n-0853dee6-w-3.619001.0.v2 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ee72f312c9252307db91a3f04eb21f52639129fce0961f1249406dbb4cf2e115
3
+ size 7637
events.out.tfevents.1672829472.t1v-n-0853dee6-w-3.648641.0.v2 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4ff003de3ea0b25fa9b42f409d06a20858e98feb1ce71825aa81e9bf1947cef0
3
+ size 7637
events.out.tfevents.1672831040.t1v-n-0853dee6-w-3.679014.0.v2 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b382e49a68b3676d58ad0c808b19b1ee38d13cc19d6b17f13bb65babe47d11ff
3
+ size 7637
events.out.tfevents.1672834221.t1v-n-0853dee6-w-3.710898.0.v2 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8216431c300b3c4fbb9dbac7a6bea7a6b27f62da5d35768b21ee35559aeaf06f
3
+ size 7637
events.out.tfevents.1672834909.t1v-n-0853dee6-w-3.740708.0.v2 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fa375387021b8ed7a3b3fd7ca6197ff9cba91079608f8279052917b0fca28325
3
+ size 7637
events.out.tfevents.1672836931.t1v-n-0853dee6-w-3.772310.0.v2 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:32b18a6cab500901c772edc9fd9612811060039bcad5786b3a4d217ea6820945
3
+ size 7637
run_mlm_flax_stream.py CHANGED
@@ -659,11 +659,11 @@ if __name__ == "__main__":
659
  model_flax = FlaxAutoModelForMaskedLM.from_pretrained("./")
660
 
661
  model_pt.push_to_hub(training_args.hub_model_id,commit_message=f"Weights for torch of step {step+1}")
662
- model_flax
 
663
  #Delete the models to free memory
664
  del(model_pt)
665
- del(model_flax).push_to_hub(traini
666
- ng_args.hub_model_id,commit_message=f"Weights for flax of step {step+1}")
667
 
668
  print(f"Saving weights and logs of step {step+1}. \nThe result is saved to {training_args.output_dir} by worker {jax.process_index()}.")
669
 
 
659
  model_flax = FlaxAutoModelForMaskedLM.from_pretrained("./")
660
 
661
  model_pt.push_to_hub(training_args.hub_model_id,commit_message=f"Weights for torch of step {step+1}")
662
+ model_flax.push_to_hub(training_args.hub_model_id,commit_message=f"Weights for flax of step {step+1}")
663
+
664
  #Delete the models to free memory
665
  del(model_pt)
666
+ del(model_flax)
 
667
 
668
  print(f"Saving weights and logs of step {step+1}. \nThe result is saved to {training_args.output_dir} by worker {jax.process_index()}.")
669