sanchit-gandhi HF staff commited on
Commit
a869d63
β€’
1 Parent(s): 22f8323

qf2iwkac: saving weights and logs of step 0k

Browse files
flax_model.msgpack CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:48ae4ecf95a1c43c77baa282af96d82c8f123e860ab7354c6a1e9114fdd3ae58
3
  size 218688
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:75b5e8750334510038ed40defb28135d08603b14d0fa782ecae6770720ae7549
3
  size 218688
wandb/run-20220529_120458-qf2iwkac/files/output.log CHANGED
@@ -688,3 +688,10 @@ Training...: 0%|
688
 
689
  Evaluating ...: 50%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–Œ | 2/4 [00:44<00:43, 21.55s/it]
690
 
 
 
 
 
 
 
 
 
688
 
689
  Evaluating ...: 50%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–Œ | 2/4 [00:44<00:43, 21.55s/it]
690
 
691
+ Model weights saved in /home/sanchitgandhi/flax-dummy/flax_model.msgpack | 0/4 [01:09<?, ?it/s]
692
+ tokenizer config file saved in ./tokenizer_config.json
693
+ Special tokens file saved in ./special_tokens_map.json
694
+ Training...: 25%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ | 1/4 [01:12<03:38, 72.83s/it]
695
+ Step... (6 | Loss: 6.891959190368652, Learning Rate: 2.9999937396496534e-06, Gradient Norm: 0.29065653681755066)
696
+ Step... (7 | Loss: 6.901648998260498, Learning Rate: 3.5999983083456755e-06, Gradient Norm: 0.30436068773269653)
697
+ Step... (5/10 | Eval Loss: 6.89687442779541 | Eval wer: 1.3195402298850574 | Eval cer: 1.4536741214057507 |): 67%|β–‹| 2/
wandb/run-20220529_120458-qf2iwkac/files/wandb-summary.json CHANGED
@@ -1 +1 @@
1
- {"train/decoder_grad_norm": 0.28948837518692017, "train/decoder_param_norm": 10.9873046875, "train/encoder_grad_norm": 6.325928552541882e-05, "train/encoder_param_norm": 21.972986221313477, "train/grad_norm": 0.28948840498924255, "layer_grad_norm/": {"decoder": {"model": {"decoder": {"embed_positions": {"embedding": 0.1468065232038498}, "embed_tokens": {"embedding": 0.24802552163600922}, "layernorm_embedding": {"bias": 0.006764871999621391, "scale": 0.0036919426638633013}, "layers": {"FlaxBartDecoderLayers": {"encoder_attn": {"k_proj": {"bias": 3.328560029599442e-13, "kernel": 9.95582847933274e-13}, "out_proj": {"bias": 0.009623706340789795, "kernel": 4.8367015551775694e-05}, "q_proj": {"bias": 7.838640760787774e-13, "kernel": 1.0103118428667068e-12}, "v_proj": {"bias": 0.00114067189861089, "kernel": 4.325820555095561e-06}}, "encoder_attn_layer_norm": {"bias": 0.00958589930087328, "scale": 0.005416943226009607}, "fc1": {"bias": 0.00019959310884587467, "kernel": 0.0004716127004940063}, "fc2": {"bias": 0.00993743073195219, "kernel": 0.0003652443701867014}, "final_layer_norm": {"bias": 0.010846015997231007, "scale": 0.005791571922600269}, "self_attn": {"k_proj": {"bias": 1.3708134627421487e-09, "kernel": 4.577025265461998e-06}, "out_proj": {"bias": 0.009274601936340332, "kernel": 0.000999519252218306}, "q_proj": {"bias": 1.7297755903200596e-06, "kernel": 4.314425950724399e-06}, "v_proj": {"bias": 0.0008124791202135384, "kernel": 0.0009584471699781716}}, "self_attn_layer_norm": {"bias": 0.009586097672581673, "scale": 0.005415966734290123}}}}}}, "encoder": {"adapter": {"layers": {"0": {"conv": {"bias": 2.977989197461284e-07, "kernel": 1.3835579011356458e-06}}, "1": {"conv": {"bias": 3.6216874832462054e-06, "kernel": 1.5065093066368718e-06}}, "2": {"conv": {"bias": 6.303114059846848e-05, "kernel": 2.8466049570852192e-06}}}}, "encoder": {"layer_norm": {"bias": 4.9153292991377384e-08, "scale": 4.927479935190604e-08}, "layers": {"FlaxWav2Vec2EncoderLayers": {"attention": {"k_proj": {"bias": 1.459917082888187e-15, "kernel": 1.90472235206407e-10}, "out_proj": {"bias": 1.0287843679179787e-06, "kernel": 1.619392975271694e-07}, "q_proj": {"bias": 8.26251983498949e-11, "kernel": 2.237984936259707e-10}, "v_proj": {"bias": 7.108157973334528e-08, "kernel": 1.6080137754670432e-07}}, "feed_forward": {"intermediate_dense": {"bias": 4.810428322343796e-08, "kernel": 1.3616384819670202e-07}, "output_dense": {"bias": 9.441144470656582e-07, "kernel": 1.3445610136386676e-07}}, "final_layer_norm": {"bias": 3.294326367253575e-09, "scale": 2.245429175928848e-09}, "layer_norm": {"bias": 5.97741944829977e-09, "scale": 3.64200114510993e-09}}}, "pos_conv_embed": {"conv": {"bias": 3.8169284266587056e-07, "weight_g": 7.756315589801943e-09, "weight_v": 8.743172230651908e-08}}}, "feature_extractor": {"conv_layers": {"0": {"conv": {"kernel": 0.0}, "layer_norm": {"bias": 0.0, "scale": 0.0}}, "1": {"conv": {"kernel": 0.0}, "layer_norm": {"bias": 0.0, "scale": 0.0}}, "2": {"conv": {"kernel": 0.0}, "layer_norm": {"bias": 0.0, "scale": 0.0}}}}, "feature_projection": {"layer_norm": {"bias": 8.516661864632624e-08, "scale": 9.493159147666574e-09}, "projection": {"bias": 8.456348723484552e-07, "kernel": 6.272103405535745e-07}}, "masked_spec_embed": 0.0}}, "layer_param_norm/": {"decoder": {"model": {"decoder": {"embed_positions": {"embedding": 0.7995925545692444}, "embed_tokens": {"embedding": 2.5083723068237305}, "layernorm_embedding": {"bias": 0.004505096934735775, "scale": 4.000091552734375}, "layers": {"FlaxBartDecoderLayers": {"encoder_attn": {"k_proj": {"bias": 2.66487765188117e-09, "kernel": 0.43237069249153137}, "out_proj": {"bias": 0.006343331653624773, "kernel": 0.46723419427871704}, "q_proj": {"bias": 3.573997986450195e-08, "kernel": 0.4763868451118469}, "v_proj": {"bias": 0.0068108439445495605, "kernel": 0.4630829691886902}}, "encoder_attn_layer_norm": {"bias": 0.006466111168265343, "scale": 5.656971454620361}, "fc1": {"bias": 0.0032308774534612894, "kernel": 0.2302577644586563}, "fc2": {"bias": 0.00635903887450695, "kernel": 0.228498175740242}, "final_layer_norm": {"bias": 0.00622177729383111, "scale": 5.6571831703186035}, "self_attn": {"k_proj": {"bias": 1.3586953173216898e-05, "kernel": 0.4489402174949646}, "out_proj": {"bias": 0.006346424575895071, "kernel": 0.4596422016620636}, "q_proj": {"bias": 0.005505683831870556, "kernel": 0.43902429938316345}, "v_proj": {"bias": 0.0061093042604625225, "kernel": 0.47398924827575684}}, "self_attn_layer_norm": {"bias": 0.006466289050877094, "scale": 5.6569719314575195}}}}}}, "encoder": {"adapter": {"layers": {"0": {"conv": {"bias": 0.003881996963173151, "kernel": 0.7980067133903503}}, "1": {"conv": {"bias": 0.004839896224439144, "kernel": 0.7824534177780151}}, "2": {"conv": {"bias": 0.0047243074513971806, "kernel": 0.793502151966095}}}}, "encoder": {"layer_norm": {"bias": 0.0014691534452140331, "scale": 4.000521659851074}, "layers": {"FlaxWav2Vec2EncoderLayers": {"attention": {"k_proj": {"bias": 1.1641312219756728e-10, "kernel": 0.6511669158935547}, "out_proj": {"bias": 0.004517045803368092, "kernel": 0.6515875458717346}, "q_proj": {"bias": 9.10918606678024e-06, "kernel": 0.636457085609436}, "v_proj": {"bias": 0.00288462289609015, "kernel": 0.6421586871147156}}, "feed_forward": {"intermediate_dense": {"bias": 0.0023183180019259453, "kernel": 0.7063446640968323}, "output_dense": {"bias": 0.004766375757753849, "kernel": 0.7153233885765076}}, "final_layer_norm": {"bias": 0.00032824758091010153, "scale": 7.999977111816406}, "layer_norm": {"bias": 0.0006391080096364021, "scale": 8.000051498413086}}}, "pos_conv_embed": {"conv": {"bias": 0.0019932836294174194, "weight_g": 2.2777116298675537, "weight_v": 2.2775819301605225}}}, "feature_extractor": {"conv_layers": {"0": {"conv": {"kernel": 7.867799758911133}, "layer_norm": {"bias": 0.0, "scale": 5.656854152679443}}, "1": {"conv": {"kernel": 8.025212287902832}, "layer_norm": {"bias": 0.0, "scale": 5.656854152679443}}, "2": {"conv": {"kernel": 7.975250720977783}, "layer_norm": {"bias": 0.0, "scale": 5.656854152679443}}}}, "feature_projection": {"layer_norm": {"bias": 0.0016454723663628101, "scale": 5.656803131103516}, "projection": {"bias": 0.0019700622651726007, "kernel": 0.4300574064254761}}, "masked_spec_embed": 2.404470205307007}}, "train/learning_rate": 1.7999846022576094e-06, "train/loss": 6.901595592498779, "train/param_norm": 24.56690788269043, "_timestamp": 1653825986, "_runtime": 88, "_step": 4}
 
1
+ {"train/decoder_grad_norm": 0.293959379196167, "train/decoder_param_norm": 10.987303733825684, "train/encoder_grad_norm": 5.3447409300133586e-05, "train/encoder_param_norm": 21.972993850708008, "train/grad_norm": 0.293959379196167, "layer_grad_norm/": {"decoder": {"model": {"decoder": {"embed_positions": {"embedding": 0.15045838057994843}, "embed_tokens": {"embedding": 0.25100693106651306}, "layernorm_embedding": {"bias": 0.006534375250339508, "scale": 0.0037290260661393404}, "layers": {"FlaxBartDecoderLayers": {"encoder_attn": {"k_proj": {"bias": 3.375024326852938e-13, "kernel": 9.308541350921962e-13}, "out_proj": {"bias": 0.01009051688015461, "kernel": 5.131477155373432e-05}, "q_proj": {"bias": 6.986853044906038e-13, "kernel": 1.1284255864787984e-12}, "v_proj": {"bias": 0.0011288232635706663, "kernel": 4.451439963304438e-06}}, "encoder_attn_layer_norm": {"bias": 0.010046548210084438, "scale": 0.005095764063298702}, "fc1": {"bias": 0.00023096689255908132, "kernel": 0.0004343906184658408}, "fc2": {"bias": 0.010574166662991047, "kernel": 0.0004366966022644192}, "final_layer_norm": {"bias": 0.010372617281973362, "scale": 0.005281996447592974}, "self_attn": {"k_proj": {"bias": 1.130181281894238e-09, "kernel": 4.527865712589119e-06}, "out_proj": {"bias": 0.010307326912879944, "kernel": 0.0009190444834530354}, "q_proj": {"bias": 1.58041484610294e-06, "kernel": 4.583661393553484e-06}, "v_proj": {"bias": 0.0008930374169722199, "kernel": 0.0008962888969108462}}, "self_attn_layer_norm": {"bias": 0.010049148462712765, "scale": 0.005089657846838236}}}}}}, "encoder": {"adapter": {"layers": {"0": {"conv": {"bias": 2.78683756960163e-07, "kernel": 1.3058978538538213e-06}}, "1": {"conv": {"bias": 3.2350201308872784e-06, "kernel": 1.4072030580791761e-06}}, "2": {"conv": {"bias": 5.3230603953124955e-05, "kernel": 2.5485987862339243e-06}}}}, "encoder": {"layer_norm": {"bias": 4.616065041318507e-08, "scale": 4.433106681744903e-08}, "layers": {"FlaxWav2Vec2EncoderLayers": {"attention": {"k_proj": {"bias": 1.3622831949482096e-15, "kernel": 1.7724854917133825e-10}, "out_proj": {"bias": 8.77685579325771e-07, "kernel": 1.5102111206033442e-07}, "q_proj": {"bias": 6.979490596581428e-11, "kernel": 2.1076571043998626e-10}, "v_proj": {"bias": 5.5353790884282716e-08, "kernel": 1.2770148316576524e-07}}, "feed_forward": {"intermediate_dense": {"bias": 3.7398876884253696e-08, "kernel": 1.1362677554416223e-07}, "output_dense": {"bias": 8.160148468050465e-07, "kernel": 1.2486131595323968e-07}}, "final_layer_norm": {"bias": 2.575513136093832e-09, "scale": 1.88160664826853e-09}, "layer_norm": {"bias": 4.1193279898266155e-09, "scale": 2.5369788492213274e-09}}}, "pos_conv_embed": {"conv": {"bias": 3.465826523552096e-07, "weight_g": 7.019787418016676e-09, "weight_v": 7.727043538352518e-08}}}, "feature_extractor": {"conv_layers": {"0": {"conv": {"kernel": 0.0}, "layer_norm": {"bias": 0.0, "scale": 0.0}}, "1": {"conv": {"kernel": 0.0}, "layer_norm": {"bias": 0.0, "scale": 0.0}}, "2": {"conv": {"kernel": 0.0}, "layer_norm": {"bias": 0.0, "scale": 0.0}}}}, "feature_projection": {"layer_norm": {"bias": 6.646207850735664e-08, "scale": 8.013022245734192e-09}, "projection": {"bias": 6.936390377632051e-07, "kernel": 5.300959742271516e-07}}, "masked_spec_embed": 0.0}}, "layer_param_norm/": {"decoder": {"model": {"decoder": {"embed_positions": {"embedding": 0.7995960116386414}, "embed_tokens": {"embedding": 2.5083138942718506}, "layernorm_embedding": {"bias": 0.004562107380479574, "scale": 4.0000901222229}, "layers": {"FlaxBartDecoderLayers": {"encoder_attn": {"k_proj": {"bias": 2.7063116192493908e-09, "kernel": 0.43237069249153137}, "out_proj": {"bias": 0.006423539947718382, "kernel": 0.46732479333877563}, "q_proj": {"bias": 3.698390571571508e-08, "kernel": 0.4763868451118469}, "v_proj": {"bias": 0.006897295359522104, "kernel": 0.463136225938797}}, "encoder_attn_layer_norm": {"bias": 0.006547156255692244, "scale": 5.656968116760254}, "fc1": {"bias": 0.003273726673796773, "kernel": 0.23024976253509521}, "fc2": {"bias": 0.006439621560275555, "kernel": 0.22848989069461823}, "final_layer_norm": {"bias": 0.006299057509750128, "scale": 5.657195568084717}, "self_attn": {"k_proj": {"bias": 1.3696276255359408e-05, "kernel": 0.4489610493183136}, "out_proj": {"bias": 0.006426077801734209, "kernel": 0.4596632421016693}, "q_proj": {"bias": 0.005574330221861601, "kernel": 0.4390396177768707}, "v_proj": {"bias": 0.006184465251863003, "kernel": 0.4740023910999298}}, "self_attn_layer_norm": {"bias": 0.00654734019190073, "scale": 5.6569695472717285}}}}}}, "encoder": {"adapter": {"layers": {"0": {"conv": {"bias": 0.003938107285648584, "kernel": 0.7980291247367859}}, "1": {"conv": {"bias": 0.004904463887214661, "kernel": 0.7824782729148865}}, "2": {"conv": {"bias": 0.0047929175198078156, "kernel": 0.7935279607772827}}}}, "encoder": {"layer_norm": {"bias": 0.0015007174806669354, "scale": 4.000539302825928}, "layers": {"FlaxWav2Vec2EncoderLayers": {"attention": {"k_proj": {"bias": 1.1623019213757857e-10, "kernel": 0.6511669158935547}, "out_proj": {"bias": 0.004577825777232647, "kernel": 0.6515982747077942}, "q_proj": {"bias": 9.149670404440258e-06, "kernel": 0.636457085609436}, "v_proj": {"bias": 0.0029153258074074984, "kernel": 0.6421692371368408}}, "feed_forward": {"intermediate_dense": {"bias": 0.0023425323888659477, "kernel": 0.7063462138175964}, "output_dense": {"bias": 0.004827361553907394, "kernel": 0.7153245210647583}}, "final_layer_norm": {"bias": 0.00033075266401283443, "scale": 7.999977111816406}, "layer_norm": {"bias": 0.0006435627001337707, "scale": 8.000053405761719}}}, "pos_conv_embed": {"conv": {"bias": 0.002023016568273306, "weight_g": 2.277714252471924, "weight_v": 2.2775814533233643}}}, "feature_extractor": {"conv_layers": {"0": {"conv": {"kernel": 7.867799758911133}, "layer_norm": {"bias": 0.0, "scale": 5.656854152679443}}, "1": {"conv": {"kernel": 8.025212287902832}, "layer_norm": {"bias": 0.0, "scale": 5.656854152679443}}, "2": {"conv": {"kernel": 7.975250720977783}, "layer_norm": {"bias": 0.0, "scale": 5.656854152679443}}}}, "feature_projection": {"layer_norm": {"bias": 0.0016678336542099714, "scale": 5.656801223754883}, "projection": {"bias": 0.0020024373661726713, "kernel": 0.4300427734851837}}, "masked_spec_embed": 2.404470205307007}}, "train/learning_rate": 4.799978341907263e-06, "train/loss": 6.903526782989502, "train/param_norm": 24.56691551208496, "_timestamp": 1653826062, "_runtime": 164, "_step": 9, "eval/loss": 6.89687442779541, "eval/wer": 1.3195402298850574, "eval/cer": 1.4536741214057507, "eval/step_0k": {"_type": "table-file", "path": "media/table/eval/step_0k_5_92ae240f2472ae79ad31.table.json", "sha256": "92ae240f2472ae79ad316b729700c013d11e821782ca7e4cd33eb61c840df66d", "size": 11870, "artifact_path": "wandb-client-artifact://e0ee9vqfa2o0de6mdryraqvdeo75406tm7340121ydcg07o2xduuis84iob81362njk07nbiunrvc6uhqqk5g7unxjij98oid247vn20rsiy8b2ehvb5ktv1wqg5jt3u:latest/eval/step_0k.table.json", "_latest_artifact_path": "wandb-client-artifact://e0ee9vqfa2o0de6mdryraqvdeo75406tm7340121ydcg07o2xduuis84iob81362njk07nbiunrvc6uhqqk5g7unxjij98oid247vn20rsiy8b2ehvb5ktv1wqg5jt3u:latest/eval/step_0k.table.json", "ncols": 3, "nrows": 50}, "eval/step_0k_incorrect": {"_type": "table-file", "path": "media/table/eval/step_0k_incorrect_5_b30e6671cbaaf5c82b6d.table.json", "sha256": "b30e6671cbaaf5c82b6d15ffbbb5d23d008017499213a95041da6a0e38000fb4", "size": 15179, "artifact_path": "wandb-client-artifact://1g1krxry75qdj2cxj79rep1n2ssa02jrnmc0ylbhmvk2i0qh1p5m5x4y3uli6xtfvwb3m4506hobe5ciztd1wcm2mcv3ebe7bxonu1utd77sa1syhkwywg76ogf8h0j7:latest/eval/step_0k_incorrect.table.json", "_latest_artifact_path": "wandb-client-artifact://1g1krxry75qdj2cxj79rep1n2ssa02jrnmc0ylbhmvk2i0qh1p5m5x4y3uli6xtfvwb3m4506hobe5ciztd1wcm2mcv3ebe7bxonu1utd77sa1syhkwywg76ogf8h0j7:latest/eval/step_0k_incorrect.table.json", "ncols": 3, "nrows": 64}}
wandb/run-20220529_120458-qf2iwkac/logs/debug-internal.log CHANGED
@@ -106,3 +106,40 @@
106
  2022-05-29 12:07:36,995 DEBUG SenderThread:1721989 [sender.py:send():235] send: files
107
  2022-05-29 12:07:36,995 INFO SenderThread:1721989 [sender.py:_save_file():947] saving file media/table/eval/step_0k_5_92ae240f2472ae79ad31.table.json with policy now
108
  2022-05-29 12:07:36,996 DEBUG SenderThread:1721989 [sender.py:send_request():249] send_request: log_artifact
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
106
  2022-05-29 12:07:36,995 DEBUG SenderThread:1721989 [sender.py:send():235] send: files
107
  2022-05-29 12:07:36,995 INFO SenderThread:1721989 [sender.py:_save_file():947] saving file media/table/eval/step_0k_5_92ae240f2472ae79ad31.table.json with policy now
108
  2022-05-29 12:07:36,996 DEBUG SenderThread:1721989 [sender.py:send_request():249] send_request: log_artifact
109
+ 2022-05-29 12:07:37,260 INFO Thread-14 :1721989 [upload_job.py:push():137] Uploaded file /tmp/tmpl7u6xxw8wandb/1pl7w3az-media/table/eval/step_0k_5_92ae240f2472ae79ad31.table.json
110
+ 2022-05-29 12:07:37,399 INFO Thread-16 :1721989 [upload_job.py:push():95] Uploaded file /home/sanchitgandhi/.cache/wandb/artifacts/obj/md5/61/78965daa44518dd50bc0e765d67f24
111
+ 2022-05-29 12:07:37,903 INFO SenderThread:1721989 [sender.py:send_request_log_artifact():976] logged artifact run-qf2iwkac-evalstep_0k_incorrect - {'id': 'QXJ0aWZhY3Q6MTM1NjQyNTkx', 'digest': 'cd720fb526739060a418c512f0e14c5f', 'state': 'PENDING', 'aliases': [], 'artifactSequence': {'id': 'QXJ0aWZhY3RDb2xsZWN0aW9uOjIxNTAwMTY4', 'latestArtifact': None}, 'version': 'latest'}
112
+ 2022-05-29 12:07:37,903 DEBUG SenderThread:1721989 [sender.py:send():235] send: files
113
+ 2022-05-29 12:07:37,903 INFO SenderThread:1721989 [sender.py:_save_file():947] saving file media/table/eval/step_0k_incorrect_5_b30e6671cbaaf5c82b6d.table.json with policy now
114
+ 2022-05-29 12:07:38,134 INFO Thread-17 :1721989 [upload_job.py:push():137] Uploaded file /tmp/tmpl7u6xxw8wandb/26g70uwz-media/table/eval/step_0k_incorrect_5_b30e6671cbaaf5c82b6d.table.json
115
+ 2022-05-29 12:07:38,530 INFO Thread-7 :1721989 [dir_watcher.py:_on_file_modified():230] file/dir modified: /home/sanchitgandhi/flax-dummy/wandb/run-20220529_120458-qf2iwkac/files/output.log
116
+ 2022-05-29 12:07:40,346 DEBUG HandlerThread:1721989 [handler.py:handle_request():141] handle_request: partial_history
117
+ 2022-05-29 12:07:40,349 DEBUG SenderThread:1721989 [sender.py:send():235] send: history
118
+ 2022-05-29 12:07:40,349 DEBUG SenderThread:1721989 [sender.py:send():235] send: summary
119
+ 2022-05-29 12:07:40,350 INFO SenderThread:1721989 [sender.py:_save_file():947] saving file wandb-summary.json with policy end
120
+ 2022-05-29 12:07:40,392 DEBUG HandlerThread:1721989 [handler.py:handle_request():141] handle_request: partial_history
121
+ 2022-05-29 12:07:40,531 INFO Thread-7 :1721989 [dir_watcher.py:_on_file_modified():230] file/dir modified: /home/sanchitgandhi/flax-dummy/wandb/run-20220529_120458-qf2iwkac/files/wandb-summary.json
122
+ 2022-05-29 12:07:40,531 INFO Thread-7 :1721989 [dir_watcher.py:_on_file_modified():230] file/dir modified: /home/sanchitgandhi/flax-dummy/wandb/run-20220529_120458-qf2iwkac/files/output.log
123
+ 2022-05-29 12:07:40,896 DEBUG HandlerThread:1721989 [handler.py:handle_request():141] handle_request: partial_history
124
+ 2022-05-29 12:07:40,899 DEBUG SenderThread:1721989 [sender.py:send():235] send: history
125
+ 2022-05-29 12:07:40,899 DEBUG SenderThread:1721989 [sender.py:send():235] send: summary
126
+ 2022-05-29 12:07:40,900 INFO SenderThread:1721989 [sender.py:_save_file():947] saving file wandb-summary.json with policy end
127
+ 2022-05-29 12:07:40,946 DEBUG HandlerThread:1721989 [handler.py:handle_request():141] handle_request: partial_history
128
+ 2022-05-29 12:07:41,532 INFO Thread-7 :1721989 [dir_watcher.py:_on_file_modified():230] file/dir modified: /home/sanchitgandhi/flax-dummy/wandb/run-20220529_120458-qf2iwkac/files/wandb-summary.json
129
+ 2022-05-29 12:07:41,543 DEBUG HandlerThread:1721989 [handler.py:handle_request():141] handle_request: partial_history
130
+ 2022-05-29 12:07:41,545 DEBUG SenderThread:1721989 [sender.py:send():235] send: history
131
+ 2022-05-29 12:07:41,546 DEBUG SenderThread:1721989 [sender.py:send():235] send: summary
132
+ 2022-05-29 12:07:41,547 INFO SenderThread:1721989 [sender.py:_save_file():947] saving file wandb-summary.json with policy end
133
+ 2022-05-29 12:07:41,579 DEBUG HandlerThread:1721989 [handler.py:handle_request():141] handle_request: partial_history
134
+ 2022-05-29 12:07:42,158 DEBUG HandlerThread:1721989 [handler.py:handle_request():141] handle_request: partial_history
135
+ 2022-05-29 12:07:42,160 DEBUG SenderThread:1721989 [sender.py:send():235] send: history
136
+ 2022-05-29 12:07:42,161 DEBUG SenderThread:1721989 [sender.py:send():235] send: summary
137
+ 2022-05-29 12:07:42,162 INFO SenderThread:1721989 [sender.py:_save_file():947] saving file wandb-summary.json with policy end
138
+ 2022-05-29 12:07:42,200 DEBUG HandlerThread:1721989 [handler.py:handle_request():141] handle_request: partial_history
139
+ 2022-05-29 12:07:42,532 INFO Thread-7 :1721989 [dir_watcher.py:_on_file_modified():230] file/dir modified: /home/sanchitgandhi/flax-dummy/wandb/run-20220529_120458-qf2iwkac/files/wandb-summary.json
140
+ 2022-05-29 12:07:42,532 INFO Thread-7 :1721989 [dir_watcher.py:_on_file_modified():230] file/dir modified: /home/sanchitgandhi/flax-dummy/wandb/run-20220529_120458-qf2iwkac/files/output.log
141
+ 2022-05-29 12:07:42,826 DEBUG HandlerThread:1721989 [handler.py:handle_request():141] handle_request: partial_history
142
+ 2022-05-29 12:07:42,828 DEBUG SenderThread:1721989 [sender.py:send():235] send: history
143
+ 2022-05-29 12:07:42,828 DEBUG SenderThread:1721989 [sender.py:send():235] send: summary
144
+ 2022-05-29 12:07:42,829 INFO SenderThread:1721989 [sender.py:_save_file():947] saving file wandb-summary.json with policy end
145
+ 2022-05-29 12:07:42,857 DEBUG HandlerThread:1721989 [handler.py:handle_request():141] handle_request: partial_history
wandb/run-20220529_120458-qf2iwkac/run-qf2iwkac.wandb CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:41e927d4ed171d350275f4ae49605fcc773813f7805542baaaf99fa5fcebe420
3
- size 52033
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:343d5b5cb7c2d56f5ad74b6d5a9944575de7d474ed9807b10cfe1607f97921c1
3
+ size 89472