peterdavidfagan commited on
Commit
b507d57
1 Parent(s): 79c40a7

Upload folder using huggingface_hub (#1)

Browse files

- decdc90770b1adc1bc439a7b74c63a91fdf79cadbb3091abd8237c9b48f30053 (a438342c609da3aa70384d4bc06ecb466f27b103)
- 629223f91073e498df666b138ecccd9a1d76a74778857f4f310c06b0987b2910 (b1e4b89e5e17f80b0735c8070575f9b20af30812)
- 7fae11f440b7adff3e35de256a0b723b2d83bd120f11b58068f55cd05e3d8278 (74db67e2cbc1c2a97c5d8f63346115330cd17eea)
- b4b5c69298ff0fe3f8bf5f4b5968d2ac1e046f2afb3232daa96777ccf4a15488 (d8e8634ae43d99bcb1d35fdddedfd23c642d059b)
- caa3832e76ecb2e3120e3271f7b8214847858bf8a14c179b847fff5a97c42457 (d3eaa41fcf926d3d5f8f62b372276d0b02ca829d)
- b66a9a542c3be7a077995c62d2e5b1c180a88c7b97c47a8f0dfa7701cf8026d2 (c095a6d16e9464691c6e4573a173fd6f2e5f6f2e)
- a01a3d7f8789b74599c0e5fedb097a0f02d7b804472c98bee57a93872f400710 (01b3aeb6e6205faecb6ae080cad48d270a665788)
- 884b5e3c328bacce2a39b7142171b8806676ffd6842d224c1d3419cfad86d6be (840b584b0336cf8a50400e9f8480abc507fc1810)
- 799766e61f3559a807431c5332acb9eeeacd8989ed48fb797f55d4549ee8b97a (bf80070694dca185c25dc2af582ae67e99c562ba)
- e2446e8ccb2230cfd6b4924f8517cd21cb960e8f1260394a419517033c01b18e (e4cc082db4f027fd7ddd788c7e37fe9d309adc46)
- 0536a96524e960aed16194820f780d174fd400216e391a1b9a39ce87bb155aba (c644a3bc2a395cfff763dbc8a20286ecbee93eee)
- 661611d9061d7f18c14311846fd027b69d918b432e4309cf60e7197b93002d60 (9ba420d723a87744c2996998dcb6bfd394a3711d)
- 495f91f5dcfaf47d2e3544308ed4a2a7c59d1230e1ae532d6a6952a060ae2dea (1d73f3d0c48b792160bbf7184ff4c8d8fdd0a448)
- 58f77f04bc2e842eaa3b3e0d08749bf74e6c504ee6c01ab062c0995751c7147f (57bbcef243511c8bc5bc031f1a56579e2d0f3d02)
- 89772979a09c19c7c3b367a0d9f2dab710811a1d90e1a497577c1fb7a9c0b655 (191afc5fd904b79599ccb10fa543d91712e3c0aa)
- 71cf9632de6be65606aee221108619f1e97a475a33793f164e4753b3a59bdcb4 (342aca30a36267cf99ada2d7a8cc599fa065e889)
- 37624c845a40ccfce18390558f0e4e5831140742b57e91bfa66316921d245750 (46088490ae137f5a540c32f6e3517c904b83463f)

This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +94 -0
  2. default/checkpoint +1 -0
  3. default/metrics._reduction_counter.value/.zarray +1 -0
  4. default/metrics._reduction_counter.value/0 +0 -0
  5. default/metrics.loss.count/.zarray +1 -0
  6. default/metrics.loss.count/0 +0 -0
  7. default/metrics.loss.total/.zarray +1 -0
  8. default/metrics.loss.total/0 +0 -0
  9. default/opt_state.1.0.count/.zarray +1 -0
  10. default/opt_state.1.0.count/0 +0 -0
  11. default/opt_state.1.0.mu.attention_blocks.ScanEncoder1DBlock_0.LayerNorm_0.bias/.zarray +1 -0
  12. default/opt_state.1.0.mu.attention_blocks.ScanEncoder1DBlock_0.LayerNorm_0.bias/0.0 +0 -0
  13. default/opt_state.1.0.mu.attention_blocks.ScanEncoder1DBlock_0.LayerNorm_0.scale/.zarray +1 -0
  14. default/opt_state.1.0.mu.attention_blocks.ScanEncoder1DBlock_0.LayerNorm_0.scale/0.0 +0 -0
  15. default/opt_state.1.0.mu.attention_blocks.ScanEncoder1DBlock_0.LayerNorm_1.bias/.zarray +1 -0
  16. default/opt_state.1.0.mu.attention_blocks.ScanEncoder1DBlock_0.LayerNorm_1.bias/0.0 +0 -0
  17. default/opt_state.1.0.mu.attention_blocks.ScanEncoder1DBlock_0.LayerNorm_1.scale/.zarray +1 -0
  18. default/opt_state.1.0.mu.attention_blocks.ScanEncoder1DBlock_0.LayerNorm_1.scale/0.0 +0 -0
  19. default/opt_state.1.0.mu.attention_blocks.ScanEncoder1DBlock_0.MLPBlock_0.Dense_0.bias/.zarray +1 -0
  20. default/opt_state.1.0.mu.attention_blocks.ScanEncoder1DBlock_0.MLPBlock_0.Dense_0.bias/0.0 +0 -0
  21. default/opt_state.1.0.mu.attention_blocks.ScanEncoder1DBlock_0.MLPBlock_0.Dense_0.kernel/.zarray +1 -0
  22. default/opt_state.1.0.mu.attention_blocks.ScanEncoder1DBlock_0.MLPBlock_0.Dense_0.kernel/0.0.0 +3 -0
  23. default/opt_state.1.0.mu.attention_blocks.ScanEncoder1DBlock_0.MLPBlock_0.Dense_1.bias/.zarray +1 -0
  24. default/opt_state.1.0.mu.attention_blocks.ScanEncoder1DBlock_0.MLPBlock_0.Dense_1.bias/0.0 +0 -0
  25. default/opt_state.1.0.mu.attention_blocks.ScanEncoder1DBlock_0.MLPBlock_0.Dense_1.kernel/.zarray +1 -0
  26. default/opt_state.1.0.mu.attention_blocks.ScanEncoder1DBlock_0.MLPBlock_0.Dense_1.kernel/0.0.0 +3 -0
  27. default/opt_state.1.0.mu.attention_blocks.ScanEncoder1DBlock_0.MultiHeadDotProductAttention_0.key.bias/.zarray +1 -0
  28. default/opt_state.1.0.mu.attention_blocks.ScanEncoder1DBlock_0.MultiHeadDotProductAttention_0.key.bias/0.0.0 +0 -0
  29. default/opt_state.1.0.mu.attention_blocks.ScanEncoder1DBlock_0.MultiHeadDotProductAttention_0.key.kernel/.zarray +1 -0
  30. default/opt_state.1.0.mu.attention_blocks.ScanEncoder1DBlock_0.MultiHeadDotProductAttention_0.key.kernel/0.0.0.0 +3 -0
  31. default/opt_state.1.0.mu.attention_blocks.ScanEncoder1DBlock_0.MultiHeadDotProductAttention_0.out.bias/.zarray +1 -0
  32. default/opt_state.1.0.mu.attention_blocks.ScanEncoder1DBlock_0.MultiHeadDotProductAttention_0.out.bias/0.0 +0 -0
  33. default/opt_state.1.0.mu.attention_blocks.ScanEncoder1DBlock_0.MultiHeadDotProductAttention_0.out.kernel/.zarray +1 -0
  34. default/opt_state.1.0.mu.attention_blocks.ScanEncoder1DBlock_0.MultiHeadDotProductAttention_0.out.kernel/0.0.0.0 +3 -0
  35. default/opt_state.1.0.mu.attention_blocks.ScanEncoder1DBlock_0.MultiHeadDotProductAttention_0.query.bias/.zarray +1 -0
  36. default/opt_state.1.0.mu.attention_blocks.ScanEncoder1DBlock_0.MultiHeadDotProductAttention_0.query.bias/0.0.0 +0 -0
  37. default/opt_state.1.0.mu.attention_blocks.ScanEncoder1DBlock_0.MultiHeadDotProductAttention_0.query.kernel/.zarray +1 -0
  38. default/opt_state.1.0.mu.attention_blocks.ScanEncoder1DBlock_0.MultiHeadDotProductAttention_0.query.kernel/0.0.0.0 +3 -0
  39. default/opt_state.1.0.mu.attention_blocks.ScanEncoder1DBlock_0.MultiHeadDotProductAttention_0.value.bias/.zarray +1 -0
  40. default/opt_state.1.0.mu.attention_blocks.ScanEncoder1DBlock_0.MultiHeadDotProductAttention_0.value.bias/0.0.0 +0 -0
  41. default/opt_state.1.0.mu.attention_blocks.ScanEncoder1DBlock_0.MultiHeadDotProductAttention_0.value.kernel/.zarray +1 -0
  42. default/opt_state.1.0.mu.attention_blocks.ScanEncoder1DBlock_0.MultiHeadDotProductAttention_0.value.kernel/0.0.0.0 +3 -0
  43. default/opt_state.1.0.mu.attention_blocks.posembed_input.pos_embedding/.zarray +1 -0
  44. default/opt_state.1.0.mu.attention_blocks.posembed_input.pos_embedding/0.0.0 +0 -0
  45. default/opt_state.1.0.mu.continuous_action_head.Dense_0.bias/.zarray +1 -0
  46. default/opt_state.1.0.mu.continuous_action_head.Dense_0.bias/0 +0 -0
  47. default/opt_state.1.0.mu.continuous_action_head.Dense_0.kernel/.zarray +1 -0
  48. default/opt_state.1.0.mu.continuous_action_head.Dense_0.kernel/0.0 +0 -0
  49. default/opt_state.1.0.mu.image_encoder.embedding_function.Conv_0.bias/.zarray +1 -0
  50. default/opt_state.1.0.mu.image_encoder.embedding_function.Conv_0.bias/0 +0 -0
.gitattributes CHANGED
@@ -33,3 +33,97 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ default/opt_state.1.0.mu.attention_blocks.ScanEncoder1DBlock_0.MLPBlock_0.Dense_0.kernel/0.0.0 filter=lfs diff=lfs merge=lfs -text
37
+ default/opt_state.1.0.mu.attention_blocks.ScanEncoder1DBlock_0.MLPBlock_0.Dense_1.kernel/0.0.0 filter=lfs diff=lfs merge=lfs -text
38
+ default/opt_state.1.0.mu.attention_blocks.ScanEncoder1DBlock_0.MultiHeadDotProductAttention_0.key.kernel/0.0.0.0 filter=lfs diff=lfs merge=lfs -text
39
+ default/opt_state.1.0.mu.attention_blocks.ScanEncoder1DBlock_0.MultiHeadDotProductAttention_0.out.kernel/0.0.0.0 filter=lfs diff=lfs merge=lfs -text
40
+ default/opt_state.1.0.mu.attention_blocks.ScanEncoder1DBlock_0.MultiHeadDotProductAttention_0.query.kernel/0.0.0.0 filter=lfs diff=lfs merge=lfs -text
41
+ default/opt_state.1.0.mu.attention_blocks.ScanEncoder1DBlock_0.MultiHeadDotProductAttention_0.value.kernel/0.0.0.0 filter=lfs diff=lfs merge=lfs -text
42
+ default/opt_state.1.0.mu.image_encoder.embedding_function.Dense_0.kernel/0.0 filter=lfs diff=lfs merge=lfs -text
43
+ default/opt_state.1.0.nu.attention_blocks.ScanEncoder1DBlock_0.MLPBlock_0.Dense_0.kernel/0.0.0 filter=lfs diff=lfs merge=lfs -text
44
+ default/opt_state.1.0.nu.attention_blocks.ScanEncoder1DBlock_0.MLPBlock_0.Dense_1.kernel/0.0.0 filter=lfs diff=lfs merge=lfs -text
45
+ default/opt_state.1.0.nu.attention_blocks.ScanEncoder1DBlock_0.MultiHeadDotProductAttention_0.key.kernel/0.0.0.0 filter=lfs diff=lfs merge=lfs -text
46
+ default/opt_state.1.0.nu.attention_blocks.ScanEncoder1DBlock_0.MultiHeadDotProductAttention_0.out.kernel/0.0.0.0 filter=lfs diff=lfs merge=lfs -text
47
+ default/opt_state.1.0.nu.attention_blocks.ScanEncoder1DBlock_0.MultiHeadDotProductAttention_0.query.kernel/0.0.0.0 filter=lfs diff=lfs merge=lfs -text
48
+ default/opt_state.1.0.nu.attention_blocks.ScanEncoder1DBlock_0.MultiHeadDotProductAttention_0.value.kernel/0.0.0.0 filter=lfs diff=lfs merge=lfs -text
49
+ default/opt_state.1.0.nu.image_encoder.embedding_function.Dense_0.kernel/0.0 filter=lfs diff=lfs merge=lfs -text
50
+ default/params.attention_blocks.ScanEncoder1DBlock_0.MLPBlock_0.Dense_0.kernel/0.0.0 filter=lfs diff=lfs merge=lfs -text
51
+ default/params.attention_blocks.ScanEncoder1DBlock_0.MLPBlock_0.Dense_1.kernel/0.0.0 filter=lfs diff=lfs merge=lfs -text
52
+ default/params.attention_blocks.ScanEncoder1DBlock_0.MultiHeadDotProductAttention_0.key.kernel/0.0.0.0 filter=lfs diff=lfs merge=lfs -text
53
+ default/params.attention_blocks.ScanEncoder1DBlock_0.MultiHeadDotProductAttention_0.out.kernel/0.0.0.0 filter=lfs diff=lfs merge=lfs -text
54
+ default/params.attention_blocks.ScanEncoder1DBlock_0.MultiHeadDotProductAttention_0.query.kernel/0.0.0.0 filter=lfs diff=lfs merge=lfs -text
55
+ default/params.attention_blocks.ScanEncoder1DBlock_0.MultiHeadDotProductAttention_0.value.kernel/0.0.0.0 filter=lfs diff=lfs merge=lfs -text
56
+ default/params.image_encoder.embedding_function.Dense_0.kernel/0.0 filter=lfs diff=lfs merge=lfs -text
57
+ default/params.text_encoder.model.encoder.block.0.layer.0.SelfAttention.k.kernel/0.0 filter=lfs diff=lfs merge=lfs -text
58
+ default/params.text_encoder.model.encoder.block.0.layer.0.SelfAttention.o.kernel/0.0 filter=lfs diff=lfs merge=lfs -text
59
+ default/params.text_encoder.model.encoder.block.0.layer.0.SelfAttention.q.kernel/0.0 filter=lfs diff=lfs merge=lfs -text
60
+ default/params.text_encoder.model.encoder.block.0.layer.0.SelfAttention.v.kernel/0.0 filter=lfs diff=lfs merge=lfs -text
61
+ default/params.text_encoder.model.encoder.block.0.layer.1.DenseReluDense.wi.kernel/0.0 filter=lfs diff=lfs merge=lfs -text
62
+ default/params.text_encoder.model.encoder.block.0.layer.1.DenseReluDense.wo.kernel/0.0 filter=lfs diff=lfs merge=lfs -text
63
+ default/params.text_encoder.model.encoder.block.1.layer.0.SelfAttention.k.kernel/0.0 filter=lfs diff=lfs merge=lfs -text
64
+ default/params.text_encoder.model.encoder.block.1.layer.0.SelfAttention.o.kernel/0.0 filter=lfs diff=lfs merge=lfs -text
65
+ default/params.text_encoder.model.encoder.block.1.layer.0.SelfAttention.q.kernel/0.0 filter=lfs diff=lfs merge=lfs -text
66
+ default/params.text_encoder.model.encoder.block.1.layer.0.SelfAttention.v.kernel/0.0 filter=lfs diff=lfs merge=lfs -text
67
+ default/params.text_encoder.model.encoder.block.1.layer.1.DenseReluDense.wi.kernel/0.0 filter=lfs diff=lfs merge=lfs -text
68
+ default/params.text_encoder.model.encoder.block.1.layer.1.DenseReluDense.wo.kernel/0.0 filter=lfs diff=lfs merge=lfs -text
69
+ default/params.text_encoder.model.encoder.block.10.layer.0.SelfAttention.k.kernel/0.0 filter=lfs diff=lfs merge=lfs -text
70
+ default/params.text_encoder.model.encoder.block.10.layer.0.SelfAttention.o.kernel/0.0 filter=lfs diff=lfs merge=lfs -text
71
+ default/params.text_encoder.model.encoder.block.10.layer.0.SelfAttention.q.kernel/0.0 filter=lfs diff=lfs merge=lfs -text
72
+ default/params.text_encoder.model.encoder.block.10.layer.0.SelfAttention.v.kernel/0.0 filter=lfs diff=lfs merge=lfs -text
73
+ default/params.text_encoder.model.encoder.block.10.layer.1.DenseReluDense.wi.kernel/0.0 filter=lfs diff=lfs merge=lfs -text
74
+ default/params.text_encoder.model.encoder.block.10.layer.1.DenseReluDense.wo.kernel/0.0 filter=lfs diff=lfs merge=lfs -text
75
+ default/params.text_encoder.model.encoder.block.11.layer.0.SelfAttention.k.kernel/0.0 filter=lfs diff=lfs merge=lfs -text
76
+ default/params.text_encoder.model.encoder.block.11.layer.0.SelfAttention.o.kernel/0.0 filter=lfs diff=lfs merge=lfs -text
77
+ default/params.text_encoder.model.encoder.block.11.layer.0.SelfAttention.q.kernel/0.0 filter=lfs diff=lfs merge=lfs -text
78
+ default/params.text_encoder.model.encoder.block.11.layer.0.SelfAttention.v.kernel/0.0 filter=lfs diff=lfs merge=lfs -text
79
+ default/params.text_encoder.model.encoder.block.11.layer.1.DenseReluDense.wi.kernel/0.0 filter=lfs diff=lfs merge=lfs -text
80
+ default/params.text_encoder.model.encoder.block.11.layer.1.DenseReluDense.wo.kernel/0.0 filter=lfs diff=lfs merge=lfs -text
81
+ default/params.text_encoder.model.encoder.block.2.layer.0.SelfAttention.k.kernel/0.0 filter=lfs diff=lfs merge=lfs -text
82
+ default/params.text_encoder.model.encoder.block.2.layer.0.SelfAttention.o.kernel/0.0 filter=lfs diff=lfs merge=lfs -text
83
+ default/params.text_encoder.model.encoder.block.2.layer.0.SelfAttention.q.kernel/0.0 filter=lfs diff=lfs merge=lfs -text
84
+ default/params.text_encoder.model.encoder.block.2.layer.0.SelfAttention.v.kernel/0.0 filter=lfs diff=lfs merge=lfs -text
85
+ default/params.text_encoder.model.encoder.block.2.layer.1.DenseReluDense.wi.kernel/0.0 filter=lfs diff=lfs merge=lfs -text
86
+ default/params.text_encoder.model.encoder.block.2.layer.1.DenseReluDense.wo.kernel/0.0 filter=lfs diff=lfs merge=lfs -text
87
+ default/params.text_encoder.model.encoder.block.3.layer.0.SelfAttention.k.kernel/0.0 filter=lfs diff=lfs merge=lfs -text
88
+ default/params.text_encoder.model.encoder.block.3.layer.0.SelfAttention.o.kernel/0.0 filter=lfs diff=lfs merge=lfs -text
89
+ default/params.text_encoder.model.encoder.block.3.layer.0.SelfAttention.q.kernel/0.0 filter=lfs diff=lfs merge=lfs -text
90
+ default/params.text_encoder.model.encoder.block.3.layer.0.SelfAttention.v.kernel/0.0 filter=lfs diff=lfs merge=lfs -text
91
+ default/params.text_encoder.model.encoder.block.3.layer.1.DenseReluDense.wi.kernel/0.0 filter=lfs diff=lfs merge=lfs -text
92
+ default/params.text_encoder.model.encoder.block.3.layer.1.DenseReluDense.wo.kernel/0.0 filter=lfs diff=lfs merge=lfs -text
93
+ default/params.text_encoder.model.encoder.block.4.layer.0.SelfAttention.k.kernel/0.0 filter=lfs diff=lfs merge=lfs -text
94
+ default/params.text_encoder.model.encoder.block.4.layer.0.SelfAttention.o.kernel/0.0 filter=lfs diff=lfs merge=lfs -text
95
+ default/params.text_encoder.model.encoder.block.4.layer.0.SelfAttention.q.kernel/0.0 filter=lfs diff=lfs merge=lfs -text
96
+ default/params.text_encoder.model.encoder.block.4.layer.0.SelfAttention.v.kernel/0.0 filter=lfs diff=lfs merge=lfs -text
97
+ default/params.text_encoder.model.encoder.block.4.layer.1.DenseReluDense.wi.kernel/0.0 filter=lfs diff=lfs merge=lfs -text
98
+ default/params.text_encoder.model.encoder.block.4.layer.1.DenseReluDense.wo.kernel/0.0 filter=lfs diff=lfs merge=lfs -text
99
+ default/params.text_encoder.model.encoder.block.5.layer.0.SelfAttention.k.kernel/0.0 filter=lfs diff=lfs merge=lfs -text
100
+ default/params.text_encoder.model.encoder.block.5.layer.0.SelfAttention.o.kernel/0.0 filter=lfs diff=lfs merge=lfs -text
101
+ default/params.text_encoder.model.encoder.block.5.layer.0.SelfAttention.q.kernel/0.0 filter=lfs diff=lfs merge=lfs -text
102
+ default/params.text_encoder.model.encoder.block.5.layer.0.SelfAttention.v.kernel/0.0 filter=lfs diff=lfs merge=lfs -text
103
+ default/params.text_encoder.model.encoder.block.5.layer.1.DenseReluDense.wi.kernel/0.0 filter=lfs diff=lfs merge=lfs -text
104
+ default/params.text_encoder.model.encoder.block.5.layer.1.DenseReluDense.wo.kernel/0.0 filter=lfs diff=lfs merge=lfs -text
105
+ default/params.text_encoder.model.encoder.block.6.layer.0.SelfAttention.k.kernel/0.0 filter=lfs diff=lfs merge=lfs -text
106
+ default/params.text_encoder.model.encoder.block.6.layer.0.SelfAttention.o.kernel/0.0 filter=lfs diff=lfs merge=lfs -text
107
+ default/params.text_encoder.model.encoder.block.6.layer.0.SelfAttention.q.kernel/0.0 filter=lfs diff=lfs merge=lfs -text
108
+ default/params.text_encoder.model.encoder.block.6.layer.0.SelfAttention.v.kernel/0.0 filter=lfs diff=lfs merge=lfs -text
109
+ default/params.text_encoder.model.encoder.block.6.layer.1.DenseReluDense.wi.kernel/0.0 filter=lfs diff=lfs merge=lfs -text
110
+ default/params.text_encoder.model.encoder.block.6.layer.1.DenseReluDense.wo.kernel/0.0 filter=lfs diff=lfs merge=lfs -text
111
+ default/params.text_encoder.model.encoder.block.7.layer.0.SelfAttention.k.kernel/0.0 filter=lfs diff=lfs merge=lfs -text
112
+ default/params.text_encoder.model.encoder.block.7.layer.0.SelfAttention.o.kernel/0.0 filter=lfs diff=lfs merge=lfs -text
113
+ default/params.text_encoder.model.encoder.block.7.layer.0.SelfAttention.q.kernel/0.0 filter=lfs diff=lfs merge=lfs -text
114
+ default/params.text_encoder.model.encoder.block.7.layer.0.SelfAttention.v.kernel/0.0 filter=lfs diff=lfs merge=lfs -text
115
+ default/params.text_encoder.model.encoder.block.7.layer.1.DenseReluDense.wi.kernel/0.0 filter=lfs diff=lfs merge=lfs -text
116
+ default/params.text_encoder.model.encoder.block.7.layer.1.DenseReluDense.wo.kernel/0.0 filter=lfs diff=lfs merge=lfs -text
117
+ default/params.text_encoder.model.encoder.block.8.layer.0.SelfAttention.k.kernel/0.0 filter=lfs diff=lfs merge=lfs -text
118
+ default/params.text_encoder.model.encoder.block.8.layer.0.SelfAttention.o.kernel/0.0 filter=lfs diff=lfs merge=lfs -text
119
+ default/params.text_encoder.model.encoder.block.8.layer.0.SelfAttention.q.kernel/0.0 filter=lfs diff=lfs merge=lfs -text
120
+ default/params.text_encoder.model.encoder.block.8.layer.0.SelfAttention.v.kernel/0.0 filter=lfs diff=lfs merge=lfs -text
121
+ default/params.text_encoder.model.encoder.block.8.layer.1.DenseReluDense.wi.kernel/0.0 filter=lfs diff=lfs merge=lfs -text
122
+ default/params.text_encoder.model.encoder.block.8.layer.1.DenseReluDense.wo.kernel/0.0 filter=lfs diff=lfs merge=lfs -text
123
+ default/params.text_encoder.model.encoder.block.9.layer.0.SelfAttention.k.kernel/0.0 filter=lfs diff=lfs merge=lfs -text
124
+ default/params.text_encoder.model.encoder.block.9.layer.0.SelfAttention.o.kernel/0.0 filter=lfs diff=lfs merge=lfs -text
125
+ default/params.text_encoder.model.encoder.block.9.layer.0.SelfAttention.q.kernel/0.0 filter=lfs diff=lfs merge=lfs -text
126
+ default/params.text_encoder.model.encoder.block.9.layer.0.SelfAttention.v.kernel/0.0 filter=lfs diff=lfs merge=lfs -text
127
+ default/params.text_encoder.model.encoder.block.9.layer.1.DenseReluDense.wi.kernel/0.0 filter=lfs diff=lfs merge=lfs -text
128
+ default/params.text_encoder.model.encoder.block.9.layer.1.DenseReluDense.wo.kernel/0.0 filter=lfs diff=lfs merge=lfs -text
129
+ default/params.text_encoder.model.shared.embedding/0.0 filter=lfs diff=lfs merge=lfs -text
default/checkpoint ADDED
@@ -0,0 +1 @@
 
 
1
+ ��categorical_train_step��continuous_train_step��diffusion_train_step��metrics��_reduction_counter��value�.PLACEHOLDER://metrics._reduction_counter.value�loss��count� PLACEHOLDER://metrics.loss.count�total� PLACEHOLDER://metrics.loss.total�opt_state�����count�!PLACEHOLDER://opt_state.1.0.count�mu��attention_blocks��ScanEncoder1DBlock_0��LayerNorm_0��bias�UPLACEHOLDER://opt_state.1.0.mu.attention_blocks.ScanEncoder1DBlock_0.LayerNorm_0.bias�scale�VPLACEHOLDER://opt_state.1.0.mu.attention_blocks.ScanEncoder1DBlock_0.LayerNorm_0.scale�LayerNorm_1��bias�UPLACEHOLDER://opt_state.1.0.mu.attention_blocks.ScanEncoder1DBlock_0.LayerNorm_1.bias�scale�VPLACEHOLDER://opt_state.1.0.mu.attention_blocks.ScanEncoder1DBlock_0.LayerNorm_1.scale�MLPBlock_0��Dense_0��bias�\PLACEHOLDER://opt_state.1.0.mu.attention_blocks.ScanEncoder1DBlock_0.MLPBlock_0.Dense_0.bias�kernel�^PLACEHOLDER://opt_state.1.0.mu.attention_blocks.ScanEncoder1DBlock_0.MLPBlock_0.Dense_0.kernel�Dense_1��bias�\PLACEHOLDER://opt_state.1.0.mu.attention_blocks.ScanEncoder1DBlock_0.MLPBlock_0.Dense_1.bias�kernel�^PLACEHOLDER://opt_state.1.0.mu.attention_blocks.ScanEncoder1DBlock_0.MLPBlock_0.Dense_1.kernel�MultiHeadDotProductAttention_0��key��bias�lPLACEHOLDER://opt_state.1.0.mu.attention_blocks.ScanEncoder1DBlock_0.MultiHeadDotProductAttention_0.key.bias�kernel�nPLACEHOLDER://opt_state.1.0.mu.attention_blocks.ScanEncoder1DBlock_0.MultiHeadDotProductAttention_0.key.kernel�out��bias�lPLACEHOLDER://opt_state.1.0.mu.attention_blocks.ScanEncoder1DBlock_0.MultiHeadDotProductAttention_0.out.bias�kernel�nPLACEHOLDER://opt_state.1.0.mu.attention_blocks.ScanEncoder1DBlock_0.MultiHeadDotProductAttention_0.out.kernel�query��bias�nPLACEHOLDER://opt_state.1.0.mu.attention_blocks.ScanEncoder1DBlock_0.MultiHeadDotProductAttention_0.query.bias�kernel�pPLACEHOLDER://opt_state.1.0.mu.attention_blocks.ScanEncoder1DBlock_0.MultiHeadDotProductAttention_0.query.kernel�value��bias�nPLACEHOLDER://opt_state.1.0.mu.attention_blocks.ScanEncoder1DBlock_0.MultiHeadDotProductAttention_0.value.bias�kernel�pPLACEHOLDER://opt_state.1.0.mu.attention_blocks.ScanEncoder1DBlock_0.MultiHeadDotProductAttention_0.value.kernel�posembed_input��pos_embedding�LPLACEHOLDER://opt_state.1.0.mu.attention_blocks.posembed_input.pos_embedding�continuous_action_head��Dense_0��bias�BPLACEHOLDER://opt_state.1.0.mu.continuous_action_head.Dense_0.bias�kernel�DPLACEHOLDER://opt_state.1.0.mu.continuous_action_head.Dense_0.kernel�image_encoder��embedding_function��Conv_0��bias�KPLACEHOLDER://opt_state.1.0.mu.image_encoder.embedding_function.Conv_0.bias�kernel�MPLACEHOLDER://opt_state.1.0.mu.image_encoder.embedding_function.Conv_0.kernel�Conv_1��bias�KPLACEHOLDER://opt_state.1.0.mu.image_encoder.embedding_function.Conv_1.bias�kernel�MPLACEHOLDER://opt_state.1.0.mu.image_encoder.embedding_function.Conv_1.kernel�Conv_2��bias�KPLACEHOLDER://opt_state.1.0.mu.image_encoder.embedding_function.Conv_2.bias�kernel�MPLACEHOLDER://opt_state.1.0.mu.image_encoder.embedding_function.Conv_2.kernel�Dense_0��bias�LPLACEHOLDER://opt_state.1.0.mu.image_encoder.embedding_function.Dense_0.bias�kernel�NPLACEHOLDER://opt_state.1.0.mu.image_encoder.embedding_function.Dense_0.kernel�GroupNorm_0��bias�PPLACEHOLDER://opt_state.1.0.mu.image_encoder.embedding_function.GroupNorm_0.bias�scale�QPLACEHOLDER://opt_state.1.0.mu.image_encoder.embedding_function.GroupNorm_0.scale�GroupNorm_1��bias�PPLACEHOLDER://opt_state.1.0.mu.image_encoder.embedding_function.GroupNorm_1.bias�scale�QPLACEHOLDER://opt_state.1.0.mu.image_encoder.embedding_function.GroupNorm_1.scale�image_col_position_embedding��embedding�SPLACEHOLDER://opt_state.1.0.mu.image_encoder.image_col_position_embedding.embedding�image_row_position_embedding��embedding�SPLACEHOLDER://opt_state.1.0.mu.image_encoder.image_row_position_embedding.embedding�readout_encoder��pos_embedding�<PLACEHOLDER://opt_state.1.0.mu.readout_encoder.pos_embedding�text_encoder��model��encoder��block��0��layer��0��SelfAttention��k��kernel�`PLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.block.0.layer.0.SelfAttention.k.kernel�o��kernel�`PLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.block.0.layer.0.SelfAttention.o.kernel�q��kernel�`PLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.block.0.layer.0.SelfAttention.q.kernel�relative_attention_bias��embedding�yPLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.block.0.layer.0.SelfAttention.relative_attention_bias.embedding�v��kernel�`PLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.block.0.layer.0.SelfAttention.v.kernel�layer_norm��weight�[PLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.block.0.layer.0.layer_norm.weight�1��DenseReluDense��wi��kernel�bPLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.block.0.layer.1.DenseReluDense.wi.kernel�wo��kernel�bPLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.block.0.layer.1.DenseReluDense.wo.kernel�layer_norm��weight�[PLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.block.0.layer.1.layer_norm.weight�1��layer��0��SelfAttention��k��kernel�`PLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.block.1.layer.0.SelfAttention.k.kernel�o��kernel�`PLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.block.1.layer.0.SelfAttention.o.kernel�q��kernel�`PLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.block.1.layer.0.SelfAttention.q.kernel�v��kernel�`PLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.block.1.layer.0.SelfAttention.v.kernel�layer_norm��weight�[PLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.block.1.layer.0.layer_norm.weight�1��DenseReluDense��wi��kernel�bPLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.block.1.layer.1.DenseReluDense.wi.kernel�wo��kernel�bPLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.block.1.layer.1.DenseReluDense.wo.kernel�layer_norm��weight�[PLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.block.1.layer.1.layer_norm.weight�10��layer��0��SelfAttention��k��kernel�aPLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.block.10.layer.0.SelfAttention.k.kernel�o��kernel�aPLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.block.10.layer.0.SelfAttention.o.kernel�q��kernel�aPLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.block.10.layer.0.SelfAttention.q.kernel�v��kernel�aPLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.block.10.layer.0.SelfAttention.v.kernel�layer_norm��weight�\PLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.block.10.layer.0.layer_norm.weight�1��DenseReluDense��wi��kernel�cPLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.block.10.layer.1.DenseReluDense.wi.kernel�wo��kernel�cPLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.block.10.layer.1.DenseReluDense.wo.kernel�layer_norm��weight�\PLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.block.10.layer.1.layer_norm.weight�11��layer��0��SelfAttention��k��kernel�aPLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.block.11.layer.0.SelfAttention.k.kernel�o��kernel�aPLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.block.11.layer.0.SelfAttention.o.kernel�q��kernel�aPLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.block.11.layer.0.SelfAttention.q.kernel�v��kernel�aPLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.block.11.layer.0.SelfAttention.v.kernel�layer_norm��weight�\PLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.block.11.layer.0.layer_norm.weight�1��DenseReluDense��wi��kernel�cPLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.block.11.layer.1.DenseReluDense.wi.kernel�wo��kernel�cPLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.block.11.layer.1.DenseReluDense.wo.kernel�layer_norm��weight�\PLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.block.11.layer.1.layer_norm.weight�2��layer��0��SelfAttention��k��kernel�`PLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.block.2.layer.0.SelfAttention.k.kernel�o��kernel�`PLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.block.2.layer.0.SelfAttention.o.kernel�q��kernel�`PLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.block.2.layer.0.SelfAttention.q.kernel�v��kernel�`PLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.block.2.layer.0.SelfAttention.v.kernel�layer_norm��weight�[PLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.block.2.layer.0.layer_norm.weight�1��DenseReluDense��wi��kernel�bPLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.block.2.layer.1.DenseReluDense.wi.kernel�wo��kernel�bPLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.block.2.layer.1.DenseReluDense.wo.kernel�layer_norm��weight�[PLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.block.2.layer.1.layer_norm.weight�3��layer��0��SelfAttention��k��kernel�`PLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.block.3.layer.0.SelfAttention.k.kernel�o��kernel�`PLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.block.3.layer.0.SelfAttention.o.kernel�q��kernel�`PLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.block.3.layer.0.SelfAttention.q.kernel�v��kernel�`PLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.block.3.layer.0.SelfAttention.v.kernel�layer_norm��weight�[PLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.block.3.layer.0.layer_norm.weight�1��DenseReluDense��wi��kernel�bPLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.block.3.layer.1.DenseReluDense.wi.kernel�wo��kernel�bPLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.block.3.layer.1.DenseReluDense.wo.kernel�layer_norm��weight�[PLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.block.3.layer.1.layer_norm.weight�4��layer��0��SelfAttention��k��kernel�`PLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.block.4.layer.0.SelfAttention.k.kernel�o��kernel�`PLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.block.4.layer.0.SelfAttention.o.kernel�q��kernel�`PLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.block.4.layer.0.SelfAttention.q.kernel�v��kernel�`PLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.block.4.layer.0.SelfAttention.v.kernel�layer_norm��weight�[PLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.block.4.layer.0.layer_norm.weight�1��DenseReluDense��wi��kernel�bPLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.block.4.layer.1.DenseReluDense.wi.kernel�wo��kernel�bPLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.block.4.layer.1.DenseReluDense.wo.kernel�layer_norm��weight�[PLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.block.4.layer.1.layer_norm.weight�5��layer��0��SelfAttention��k��kernel�`PLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.block.5.layer.0.SelfAttention.k.kernel�o��kernel�`PLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.block.5.layer.0.SelfAttention.o.kernel�q��kernel�`PLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.block.5.layer.0.SelfAttention.q.kernel�v��kernel�`PLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.block.5.layer.0.SelfAttention.v.kernel�layer_norm��weight�[PLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.block.5.layer.0.layer_norm.weight�1��DenseReluDense��wi��kernel�bPLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.block.5.layer.1.DenseReluDense.wi.kernel�wo��kernel�bPLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.block.5.layer.1.DenseReluDense.wo.kernel�layer_norm��weight�[PLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.block.5.layer.1.layer_norm.weight�6��layer��0��SelfAttention��k��kernel�`PLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.block.6.layer.0.SelfAttention.k.kernel�o��kernel�`PLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.block.6.layer.0.SelfAttention.o.kernel�q��kernel�`PLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.block.6.layer.0.SelfAttention.q.kernel�v��kernel�`PLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.block.6.layer.0.SelfAttention.v.kernel�layer_norm��weight�[PLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.block.6.layer.0.layer_norm.weight�1��DenseReluDense��wi��kernel�bPLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.block.6.layer.1.DenseReluDense.wi.kernel�wo��kernel�bPLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.block.6.layer.1.DenseReluDense.wo.kernel�layer_norm��weight�[PLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.block.6.layer.1.layer_norm.weight�7��layer��0��SelfAttention��k��kernel�`PLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.block.7.layer.0.SelfAttention.k.kernel�o��kernel�`PLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.block.7.layer.0.SelfAttention.o.kernel�q��kernel�`PLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.block.7.layer.0.SelfAttention.q.kernel�v��kernel�`PLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.block.7.layer.0.SelfAttention.v.kernel�layer_norm��weight�[PLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.block.7.layer.0.layer_norm.weight�1��DenseReluDense��wi��kernel�bPLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.block.7.layer.1.DenseReluDense.wi.kernel�wo��kernel�bPLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.block.7.layer.1.DenseReluDense.wo.kernel�layer_norm��weight�[PLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.block.7.layer.1.layer_norm.weight�8��layer��0��SelfAttention��k��kernel�`PLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.block.8.layer.0.SelfAttention.k.kernel�o��kernel�`PLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.block.8.layer.0.SelfAttention.o.kernel�q��kernel�`PLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.block.8.layer.0.SelfAttention.q.kernel�v��kernel�`PLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.block.8.layer.0.SelfAttention.v.kernel�layer_norm��weight�[PLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.block.8.layer.0.layer_norm.weight�1��DenseReluDense��wi��kernel�bPLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.block.8.layer.1.DenseReluDense.wi.kernel�wo��kernel�bPLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.block.8.layer.1.DenseReluDense.wo.kernel�layer_norm��weight�[PLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.block.8.layer.1.layer_norm.weight�9��layer��0��SelfAttention��k��kernel�`PLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.block.9.layer.0.SelfAttention.k.kernel�o��kernel�`PLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.block.9.layer.0.SelfAttention.o.kernel�q��kernel�`PLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.block.9.layer.0.SelfAttention.q.kernel�v��kernel�`PLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.block.9.layer.0.SelfAttention.v.kernel�layer_norm��weight�[PLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.block.9.layer.0.layer_norm.weight�1��DenseReluDense��wi��kernel�bPLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.block.9.layer.1.DenseReluDense.wi.kernel�wo��kernel�bPLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.block.9.layer.1.DenseReluDense.wo.kernel�layer_norm��weight�[PLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.block.9.layer.1.layer_norm.weight�final_layer_norm��weight�QPLACEHOLDER://opt_state.1.0.mu.text_encoder.model.encoder.final_layer_norm.weight�shared��embedding�BPLACEHOLDER://opt_state.1.0.mu.text_encoder.model.shared.embedding�nu��attention_blocks��ScanEncoder1DBlock_0��LayerNorm_0��bias�UPLACEHOLDER://opt_state.1.0.nu.attention_blocks.ScanEncoder1DBlock_0.LayerNorm_0.bias�scale�VPLACEHOLDER://opt_state.1.0.nu.attention_blocks.ScanEncoder1DBlock_0.LayerNorm_0.scale�LayerNorm_1��bias�UPLACEHOLDER://opt_state.1.0.nu.attention_blocks.ScanEncoder1DBlock_0.LayerNorm_1.bias�scale�VPLACEHOLDER://opt_state.1.0.nu.attention_blocks.ScanEncoder1DBlock_0.LayerNorm_1.scale�MLPBlock_0��Dense_0��bias�\PLACEHOLDER://opt_state.1.0.nu.attention_blocks.ScanEncoder1DBlock_0.MLPBlock_0.Dense_0.bias�kernel�^PLACEHOLDER://opt_state.1.0.nu.attention_blocks.ScanEncoder1DBlock_0.MLPBlock_0.Dense_0.kernel�Dense_1��bias�\PLACEHOLDER://opt_state.1.0.nu.attention_blocks.ScanEncoder1DBlock_0.MLPBlock_0.Dense_1.bias�kernel�^PLACEHOLDER://opt_state.1.0.nu.attention_blocks.ScanEncoder1DBlock_0.MLPBlock_0.Dense_1.kernel�MultiHeadDotProductAttention_0��key��bias�lPLACEHOLDER://opt_state.1.0.nu.attention_blocks.ScanEncoder1DBlock_0.MultiHeadDotProductAttention_0.key.bias�kernel�nPLACEHOLDER://opt_state.1.0.nu.attention_blocks.ScanEncoder1DBlock_0.MultiHeadDotProductAttention_0.key.kernel�out��bias�lPLACEHOLDER://opt_state.1.0.nu.attention_blocks.ScanEncoder1DBlock_0.MultiHeadDotProductAttention_0.out.bias�kernel�nPLACEHOLDER://opt_state.1.0.nu.attention_blocks.ScanEncoder1DBlock_0.MultiHeadDotProductAttention_0.out.kernel�query��bias�nPLACEHOLDER://opt_state.1.0.nu.attention_blocks.ScanEncoder1DBlock_0.MultiHeadDotProductAttention_0.query.bias�kernel�pPLACEHOLDER://opt_state.1.0.nu.attention_blocks.ScanEncoder1DBlock_0.MultiHeadDotProductAttention_0.query.kernel�value��bias�nPLACEHOLDER://opt_state.1.0.nu.attention_blocks.ScanEncoder1DBlock_0.MultiHeadDotProductAttention_0.value.bias�kernel�pPLACEHOLDER://opt_state.1.0.nu.attention_blocks.ScanEncoder1DBlock_0.MultiHeadDotProductAttention_0.value.kernel�posembed_input��pos_embedding�LPLACEHOLDER://opt_state.1.0.nu.attention_blocks.posembed_input.pos_embedding�continuous_action_head��Dense_0��bias�BPLACEHOLDER://opt_state.1.0.nu.continuous_action_head.Dense_0.bias�kernel�DPLACEHOLDER://opt_state.1.0.nu.continuous_action_head.Dense_0.kernel�image_encoder��embedding_function��Conv_0��bias�KPLACEHOLDER://opt_state.1.0.nu.image_encoder.embedding_function.Conv_0.bias�kernel�MPLACEHOLDER://opt_state.1.0.nu.image_encoder.embedding_function.Conv_0.kernel�Conv_1��bias�KPLACEHOLDER://opt_state.1.0.nu.image_encoder.embedding_function.Conv_1.bias�kernel�MPLACEHOLDER://opt_state.1.0.nu.image_encoder.embedding_function.Conv_1.kernel�Conv_2��bias�KPLACEHOLDER://opt_state.1.0.nu.image_encoder.embedding_function.Conv_2.bias�kernel�MPLACEHOLDER://opt_state.1.0.nu.image_encoder.embedding_function.Conv_2.kernel�Dense_0��bias�LPLACEHOLDER://opt_state.1.0.nu.image_encoder.embedding_function.Dense_0.bias�kernel�NPLACEHOLDER://opt_state.1.0.nu.image_encoder.embedding_function.Dense_0.kernel�GroupNorm_0��bias�PPLACEHOLDER://opt_state.1.0.nu.image_encoder.embedding_function.GroupNorm_0.bias�scale�QPLACEHOLDER://opt_state.1.0.nu.image_encoder.embedding_function.GroupNorm_0.scale�GroupNorm_1��bias�PPLACEHOLDER://opt_state.1.0.nu.image_encoder.embedding_function.GroupNorm_1.bias�scale�QPLACEHOLDER://opt_state.1.0.nu.image_encoder.embedding_function.GroupNorm_1.scale�image_col_position_embedding��embedding�SPLACEHOLDER://opt_state.1.0.nu.image_encoder.image_col_position_embedding.embedding�image_row_position_embedding��embedding�SPLACEHOLDER://opt_state.1.0.nu.image_encoder.image_row_position_embedding.embedding�readout_encoder��pos_embedding�<PLACEHOLDER://opt_state.1.0.nu.readout_encoder.pos_embedding�text_encoder��model��encoder��block��0��layer��0��SelfAttention��k��kernel�`PLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.block.0.layer.0.SelfAttention.k.kernel�o��kernel�`PLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.block.0.layer.0.SelfAttention.o.kernel�q��kernel�`PLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.block.0.layer.0.SelfAttention.q.kernel�relative_attention_bias��embedding�yPLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.block.0.layer.0.SelfAttention.relative_attention_bias.embedding�v��kernel�`PLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.block.0.layer.0.SelfAttention.v.kernel�layer_norm��weight�[PLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.block.0.layer.0.layer_norm.weight�1��DenseReluDense��wi��kernel�bPLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.block.0.layer.1.DenseReluDense.wi.kernel�wo��kernel�bPLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.block.0.layer.1.DenseReluDense.wo.kernel�layer_norm��weight�[PLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.block.0.layer.1.layer_norm.weight�1��layer��0��SelfAttention��k��kernel�`PLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.block.1.layer.0.SelfAttention.k.kernel�o��kernel�`PLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.block.1.layer.0.SelfAttention.o.kernel�q��kernel�`PLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.block.1.layer.0.SelfAttention.q.kernel�v��kernel�`PLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.block.1.layer.0.SelfAttention.v.kernel�layer_norm��weight�[PLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.block.1.layer.0.layer_norm.weight�1��DenseReluDense��wi��kernel�bPLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.block.1.layer.1.DenseReluDense.wi.kernel�wo��kernel�bPLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.block.1.layer.1.DenseReluDense.wo.kernel�layer_norm��weight�[PLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.block.1.layer.1.layer_norm.weight�10��layer��0��SelfAttention��k��kernel�aPLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.block.10.layer.0.SelfAttention.k.kernel�o��kernel�aPLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.block.10.layer.0.SelfAttention.o.kernel�q��kernel�aPLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.block.10.layer.0.SelfAttention.q.kernel�v��kernel�aPLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.block.10.layer.0.SelfAttention.v.kernel�layer_norm��weight�\PLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.block.10.layer.0.layer_norm.weight�1��DenseReluDense��wi��kernel�cPLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.block.10.layer.1.DenseReluDense.wi.kernel�wo��kernel�cPLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.block.10.layer.1.DenseReluDense.wo.kernel�layer_norm��weight�\PLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.block.10.layer.1.layer_norm.weight�11��layer��0��SelfAttention��k��kernel�aPLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.block.11.layer.0.SelfAttention.k.kernel�o��kernel�aPLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.block.11.layer.0.SelfAttention.o.kernel�q��kernel�aPLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.block.11.layer.0.SelfAttention.q.kernel�v��kernel�aPLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.block.11.layer.0.SelfAttention.v.kernel�layer_norm��weight�\PLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.block.11.layer.0.layer_norm.weight�1��DenseReluDense��wi��kernel�cPLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.block.11.layer.1.DenseReluDense.wi.kernel�wo��kernel�cPLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.block.11.layer.1.DenseReluDense.wo.kernel�layer_norm��weight�\PLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.block.11.layer.1.layer_norm.weight�2��layer��0��SelfAttention��k��kernel�`PLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.block.2.layer.0.SelfAttention.k.kernel�o��kernel�`PLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.block.2.layer.0.SelfAttention.o.kernel�q��kernel�`PLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.block.2.layer.0.SelfAttention.q.kernel�v��kernel�`PLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.block.2.layer.0.SelfAttention.v.kernel�layer_norm��weight�[PLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.block.2.layer.0.layer_norm.weight�1��DenseReluDense��wi��kernel�bPLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.block.2.layer.1.DenseReluDense.wi.kernel�wo��kernel�bPLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.block.2.layer.1.DenseReluDense.wo.kernel�layer_norm��weight�[PLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.block.2.layer.1.layer_norm.weight�3��layer��0��SelfAttention��k��kernel�`PLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.block.3.layer.0.SelfAttention.k.kernel�o��kernel�`PLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.block.3.layer.0.SelfAttention.o.kernel�q��kernel�`PLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.block.3.layer.0.SelfAttention.q.kernel�v��kernel�`PLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.block.3.layer.0.SelfAttention.v.kernel�layer_norm��weight�[PLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.block.3.layer.0.layer_norm.weight�1��DenseReluDense��wi��kernel�bPLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.block.3.layer.1.DenseReluDense.wi.kernel�wo��kernel�bPLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.block.3.layer.1.DenseReluDense.wo.kernel�layer_norm��weight�[PLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.block.3.layer.1.layer_norm.weight�4��layer��0��SelfAttention��k��kernel�`PLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.block.4.layer.0.SelfAttention.k.kernel�o��kernel�`PLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.block.4.layer.0.SelfAttention.o.kernel�q��kernel�`PLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.block.4.layer.0.SelfAttention.q.kernel�v��kernel�`PLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.block.4.layer.0.SelfAttention.v.kernel�layer_norm��weight�[PLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.block.4.layer.0.layer_norm.weight�1��DenseReluDense��wi��kernel�bPLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.block.4.layer.1.DenseReluDense.wi.kernel�wo��kernel�bPLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.block.4.layer.1.DenseReluDense.wo.kernel�layer_norm��weight�[PLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.block.4.layer.1.layer_norm.weight�5��layer��0��SelfAttention��k��kernel�`PLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.block.5.layer.0.SelfAttention.k.kernel�o��kernel�`PLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.block.5.layer.0.SelfAttention.o.kernel�q��kernel�`PLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.block.5.layer.0.SelfAttention.q.kernel�v��kernel�`PLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.block.5.layer.0.SelfAttention.v.kernel�layer_norm��weight�[PLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.block.5.layer.0.layer_norm.weight�1��DenseReluDense��wi��kernel�bPLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.block.5.layer.1.DenseReluDense.wi.kernel�wo��kernel�bPLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.block.5.layer.1.DenseReluDense.wo.kernel�layer_norm��weight�[PLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.block.5.layer.1.layer_norm.weight�6��layer��0��SelfAttention��k��kernel�`PLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.block.6.layer.0.SelfAttention.k.kernel�o��kernel�`PLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.block.6.layer.0.SelfAttention.o.kernel�q��kernel�`PLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.block.6.layer.0.SelfAttention.q.kernel�v��kernel�`PLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.block.6.layer.0.SelfAttention.v.kernel�layer_norm��weight�[PLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.block.6.layer.0.layer_norm.weight�1��DenseReluDense��wi��kernel�bPLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.block.6.layer.1.DenseReluDense.wi.kernel�wo��kernel�bPLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.block.6.layer.1.DenseReluDense.wo.kernel�layer_norm��weight�[PLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.block.6.layer.1.layer_norm.weight�7��layer��0��SelfAttention��k��kernel�`PLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.block.7.layer.0.SelfAttention.k.kernel�o��kernel�`PLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.block.7.layer.0.SelfAttention.o.kernel�q��kernel�`PLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.block.7.layer.0.SelfAttention.q.kernel�v��kernel�`PLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.block.7.layer.0.SelfAttention.v.kernel�layer_norm��weight�[PLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.block.7.layer.0.layer_norm.weight�1��DenseReluDense��wi��kernel�bPLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.block.7.layer.1.DenseReluDense.wi.kernel�wo��kernel�bPLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.block.7.layer.1.DenseReluDense.wo.kernel�layer_norm��weight�[PLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.block.7.layer.1.layer_norm.weight�8��layer��0��SelfAttention��k��kernel�`PLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.block.8.layer.0.SelfAttention.k.kernel�o��kernel�`PLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.block.8.layer.0.SelfAttention.o.kernel�q��kernel�`PLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.block.8.layer.0.SelfAttention.q.kernel�v��kernel�`PLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.block.8.layer.0.SelfAttention.v.kernel�layer_norm��weight�[PLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.block.8.layer.0.layer_norm.weight�1��DenseReluDense��wi��kernel�bPLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.block.8.layer.1.DenseReluDense.wi.kernel�wo��kernel�bPLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.block.8.layer.1.DenseReluDense.wo.kernel�layer_norm��weight�[PLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.block.8.layer.1.layer_norm.weight�9��layer��0��SelfAttention��k��kernel�`PLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.block.9.layer.0.SelfAttention.k.kernel�o��kernel�`PLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.block.9.layer.0.SelfAttention.o.kernel�q��kernel�`PLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.block.9.layer.0.SelfAttention.q.kernel�v��kernel�`PLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.block.9.layer.0.SelfAttention.v.kernel�layer_norm��weight�[PLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.block.9.layer.0.layer_norm.weight�1��DenseReluDense��wi��kernel�bPLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.block.9.layer.1.DenseReluDense.wi.kernel�wo��kernel�bPLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.block.9.layer.1.DenseReluDense.wo.kernel�layer_norm��weight�[PLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.block.9.layer.1.layer_norm.weight�final_layer_norm��weight�QPLACEHOLDER://opt_state.1.0.nu.text_encoder.model.encoder.final_layer_norm.weight�shared��embedding�BPLACEHOLDER://opt_state.1.0.nu.text_encoder.model.shared.embedding���count�!PLACEHOLDER://opt_state.1.2.count�params��attention_blocks��ScanEncoder1DBlock_0��LayerNorm_0��bias�KPLACEHOLDER://params.attention_blocks.ScanEncoder1DBlock_0.LayerNorm_0.bias�scale�LPLACEHOLDER://params.attention_blocks.ScanEncoder1DBlock_0.LayerNorm_0.scale�LayerNorm_1��bias�KPLACEHOLDER://params.attention_blocks.ScanEncoder1DBlock_0.LayerNorm_1.bias�scale�LPLACEHOLDER://params.attention_blocks.ScanEncoder1DBlock_0.LayerNorm_1.scale�MLPBlock_0��Dense_0��bias�RPLACEHOLDER://params.attention_blocks.ScanEncoder1DBlock_0.MLPBlock_0.Dense_0.bias�kernel�TPLACEHOLDER://params.attention_blocks.ScanEncoder1DBlock_0.MLPBlock_0.Dense_0.kernel�Dense_1��bias�RPLACEHOLDER://params.attention_blocks.ScanEncoder1DBlock_0.MLPBlock_0.Dense_1.bias�kernel�TPLACEHOLDER://params.attention_blocks.ScanEncoder1DBlock_0.MLPBlock_0.Dense_1.kernel�MultiHeadDotProductAttention_0��key��bias�bPLACEHOLDER://params.attention_blocks.ScanEncoder1DBlock_0.MultiHeadDotProductAttention_0.key.bias�kernel�dPLACEHOLDER://params.attention_blocks.ScanEncoder1DBlock_0.MultiHeadDotProductAttention_0.key.kernel�out��bias�bPLACEHOLDER://params.attention_blocks.ScanEncoder1DBlock_0.MultiHeadDotProductAttention_0.out.bias�kernel�dPLACEHOLDER://params.attention_blocks.ScanEncoder1DBlock_0.MultiHeadDotProductAttention_0.out.kernel�query��bias�dPLACEHOLDER://params.attention_blocks.ScanEncoder1DBlock_0.MultiHeadDotProductAttention_0.query.bias�kernel�fPLACEHOLDER://params.attention_blocks.ScanEncoder1DBlock_0.MultiHeadDotProductAttention_0.query.kernel�value��bias�dPLACEHOLDER://params.attention_blocks.ScanEncoder1DBlock_0.MultiHeadDotProductAttention_0.value.bias�kernel�fPLACEHOLDER://params.attention_blocks.ScanEncoder1DBlock_0.MultiHeadDotProductAttention_0.value.kernel�posembed_input��pos_embedding�BPLACEHOLDER://params.attention_blocks.posembed_input.pos_embedding�continuous_action_head��Dense_0��bias�8PLACEHOLDER://params.continuous_action_head.Dense_0.bias�kernel�:PLACEHOLDER://params.continuous_action_head.Dense_0.kernel�image_encoder��embedding_function��Conv_0��bias�APLACEHOLDER://params.image_encoder.embedding_function.Conv_0.bias�kernel�CPLACEHOLDER://params.image_encoder.embedding_function.Conv_0.kernel�Conv_1��bias�APLACEHOLDER://params.image_encoder.embedding_function.Conv_1.bias�kernel�CPLACEHOLDER://params.image_encoder.embedding_function.Conv_1.kernel�Conv_2��bias�APLACEHOLDER://params.image_encoder.embedding_function.Conv_2.bias�kernel�CPLACEHOLDER://params.image_encoder.embedding_function.Conv_2.kernel�Dense_0��bias�BPLACEHOLDER://params.image_encoder.embedding_function.Dense_0.bias�kernel�DPLACEHOLDER://params.image_encoder.embedding_function.Dense_0.kernel�GroupNorm_0��bias�FPLACEHOLDER://params.image_encoder.embedding_function.GroupNorm_0.bias�scale�GPLACEHOLDER://params.image_encoder.embedding_function.GroupNorm_0.scale�GroupNorm_1��bias�FPLACEHOLDER://params.image_encoder.embedding_function.GroupNorm_1.bias�scale�GPLACEHOLDER://params.image_encoder.embedding_function.GroupNorm_1.scale�image_col_position_embedding��embedding�IPLACEHOLDER://params.image_encoder.image_col_position_embedding.embedding�image_row_position_embedding��embedding�IPLACEHOLDER://params.image_encoder.image_row_position_embedding.embedding�readout_encoder��pos_embedding�2PLACEHOLDER://params.readout_encoder.pos_embedding�text_encoder��model��encoder��block��0��layer��0��SelfAttention��k��kernel�VPLACEHOLDER://params.text_encoder.model.encoder.block.0.layer.0.SelfAttention.k.kernel�o��kernel�VPLACEHOLDER://params.text_encoder.model.encoder.block.0.layer.0.SelfAttention.o.kernel�q��kernel�VPLACEHOLDER://params.text_encoder.model.encoder.block.0.layer.0.SelfAttention.q.kernel�relative_attention_bias��embedding�oPLACEHOLDER://params.text_encoder.model.encoder.block.0.layer.0.SelfAttention.relative_attention_bias.embedding�v��kernel�VPLACEHOLDER://params.text_encoder.model.encoder.block.0.layer.0.SelfAttention.v.kernel�layer_norm��weight�QPLACEHOLDER://params.text_encoder.model.encoder.block.0.layer.0.layer_norm.weight�1��DenseReluDense��wi��kernel�XPLACEHOLDER://params.text_encoder.model.encoder.block.0.layer.1.DenseReluDense.wi.kernel�wo��kernel�XPLACEHOLDER://params.text_encoder.model.encoder.block.0.layer.1.DenseReluDense.wo.kernel�layer_norm��weight�QPLACEHOLDER://params.text_encoder.model.encoder.block.0.layer.1.layer_norm.weight�1��layer��0��SelfAttention��k��kernel�VPLACEHOLDER://params.text_encoder.model.encoder.block.1.layer.0.SelfAttention.k.kernel�o��kernel�VPLACEHOLDER://params.text_encoder.model.encoder.block.1.layer.0.SelfAttention.o.kernel�q��kernel�VPLACEHOLDER://params.text_encoder.model.encoder.block.1.layer.0.SelfAttention.q.kernel�v��kernel�VPLACEHOLDER://params.text_encoder.model.encoder.block.1.layer.0.SelfAttention.v.kernel�layer_norm��weight�QPLACEHOLDER://params.text_encoder.model.encoder.block.1.layer.0.layer_norm.weight�1��DenseReluDense��wi��kernel�XPLACEHOLDER://params.text_encoder.model.encoder.block.1.layer.1.DenseReluDense.wi.kernel�wo��kernel�XPLACEHOLDER://params.text_encoder.model.encoder.block.1.layer.1.DenseReluDense.wo.kernel�layer_norm��weight�QPLACEHOLDER://params.text_encoder.model.encoder.block.1.layer.1.layer_norm.weight�10��layer��0��SelfAttention��k��kernel�WPLACEHOLDER://params.text_encoder.model.encoder.block.10.layer.0.SelfAttention.k.kernel�o��kernel�WPLACEHOLDER://params.text_encoder.model.encoder.block.10.layer.0.SelfAttention.o.kernel�q��kernel�WPLACEHOLDER://params.text_encoder.model.encoder.block.10.layer.0.SelfAttention.q.kernel�v��kernel�WPLACEHOLDER://params.text_encoder.model.encoder.block.10.layer.0.SelfAttention.v.kernel�layer_norm��weight�RPLACEHOLDER://params.text_encoder.model.encoder.block.10.layer.0.layer_norm.weight�1��DenseReluDense��wi��kernel�YPLACEHOLDER://params.text_encoder.model.encoder.block.10.layer.1.DenseReluDense.wi.kernel�wo��kernel�YPLACEHOLDER://params.text_encoder.model.encoder.block.10.layer.1.DenseReluDense.wo.kernel�layer_norm��weight�RPLACEHOLDER://params.text_encoder.model.encoder.block.10.layer.1.layer_norm.weight�11��layer��0��SelfAttention��k��kernel�WPLACEHOLDER://params.text_encoder.model.encoder.block.11.layer.0.SelfAttention.k.kernel�o��kernel�WPLACEHOLDER://params.text_encoder.model.encoder.block.11.layer.0.SelfAttention.o.kernel�q��kernel�WPLACEHOLDER://params.text_encoder.model.encoder.block.11.layer.0.SelfAttention.q.kernel�v��kernel�WPLACEHOLDER://params.text_encoder.model.encoder.block.11.layer.0.SelfAttention.v.kernel�layer_norm��weight�RPLACEHOLDER://params.text_encoder.model.encoder.block.11.layer.0.layer_norm.weight�1��DenseReluDense��wi��kernel�YPLACEHOLDER://params.text_encoder.model.encoder.block.11.layer.1.DenseReluDense.wi.kernel�wo��kernel�YPLACEHOLDER://params.text_encoder.model.encoder.block.11.layer.1.DenseReluDense.wo.kernel�layer_norm��weight�RPLACEHOLDER://params.text_encoder.model.encoder.block.11.layer.1.layer_norm.weight�2��layer��0��SelfAttention��k��kernel�VPLACEHOLDER://params.text_encoder.model.encoder.block.2.layer.0.SelfAttention.k.kernel�o��kernel�VPLACEHOLDER://params.text_encoder.model.encoder.block.2.layer.0.SelfAttention.o.kernel�q��kernel�VPLACEHOLDER://params.text_encoder.model.encoder.block.2.layer.0.SelfAttention.q.kernel�v��kernel�VPLACEHOLDER://params.text_encoder.model.encoder.block.2.layer.0.SelfAttention.v.kernel�layer_norm��weight�QPLACEHOLDER://params.text_encoder.model.encoder.block.2.layer.0.layer_norm.weight�1��DenseReluDense��wi��kernel�XPLACEHOLDER://params.text_encoder.model.encoder.block.2.layer.1.DenseReluDense.wi.kernel�wo��kernel�XPLACEHOLDER://params.text_encoder.model.encoder.block.2.layer.1.DenseReluDense.wo.kernel�layer_norm��weight�QPLACEHOLDER://params.text_encoder.model.encoder.block.2.layer.1.layer_norm.weight�3��layer��0��SelfAttention��k��kernel�VPLACEHOLDER://params.text_encoder.model.encoder.block.3.layer.0.SelfAttention.k.kernel�o��kernel�VPLACEHOLDER://params.text_encoder.model.encoder.block.3.layer.0.SelfAttention.o.kernel�q��kernel�VPLACEHOLDER://params.text_encoder.model.encoder.block.3.layer.0.SelfAttention.q.kernel�v��kernel�VPLACEHOLDER://params.text_encoder.model.encoder.block.3.layer.0.SelfAttention.v.kernel�layer_norm��weight�QPLACEHOLDER://params.text_encoder.model.encoder.block.3.layer.0.layer_norm.weight�1��DenseReluDense��wi��kernel�XPLACEHOLDER://params.text_encoder.model.encoder.block.3.layer.1.DenseReluDense.wi.kernel�wo��kernel�XPLACEHOLDER://params.text_encoder.model.encoder.block.3.layer.1.DenseReluDense.wo.kernel�layer_norm��weight�QPLACEHOLDER://params.text_encoder.model.encoder.block.3.layer.1.layer_norm.weight�4��layer��0��SelfAttention��k��kernel�VPLACEHOLDER://params.text_encoder.model.encoder.block.4.layer.0.SelfAttention.k.kernel�o��kernel�VPLACEHOLDER://params.text_encoder.model.encoder.block.4.layer.0.SelfAttention.o.kernel�q��kernel�VPLACEHOLDER://params.text_encoder.model.encoder.block.4.layer.0.SelfAttention.q.kernel�v��kernel�VPLACEHOLDER://params.text_encoder.model.encoder.block.4.layer.0.SelfAttention.v.kernel�layer_norm��weight�QPLACEHOLDER://params.text_encoder.model.encoder.block.4.layer.0.layer_norm.weight�1��DenseReluDense��wi��kernel�XPLACEHOLDER://params.text_encoder.model.encoder.block.4.layer.1.DenseReluDense.wi.kernel�wo��kernel�XPLACEHOLDER://params.text_encoder.model.encoder.block.4.layer.1.DenseReluDense.wo.kernel�layer_norm��weight�QPLACEHOLDER://params.text_encoder.model.encoder.block.4.layer.1.layer_norm.weight�5��layer��0��SelfAttention��k��kernel�VPLACEHOLDER://params.text_encoder.model.encoder.block.5.layer.0.SelfAttention.k.kernel�o��kernel�VPLACEHOLDER://params.text_encoder.model.encoder.block.5.layer.0.SelfAttention.o.kernel�q��kernel�VPLACEHOLDER://params.text_encoder.model.encoder.block.5.layer.0.SelfAttention.q.kernel�v��kernel�VPLACEHOLDER://params.text_encoder.model.encoder.block.5.layer.0.SelfAttention.v.kernel�layer_norm��weight�QPLACEHOLDER://params.text_encoder.model.encoder.block.5.layer.0.layer_norm.weight�1��DenseReluDense��wi��kernel�XPLACEHOLDER://params.text_encoder.model.encoder.block.5.layer.1.DenseReluDense.wi.kernel�wo��kernel�XPLACEHOLDER://params.text_encoder.model.encoder.block.5.layer.1.DenseReluDense.wo.kernel�layer_norm��weight�QPLACEHOLDER://params.text_encoder.model.encoder.block.5.layer.1.layer_norm.weight�6��layer��0��SelfAttention��k��kernel�VPLACEHOLDER://params.text_encoder.model.encoder.block.6.layer.0.SelfAttention.k.kernel�o��kernel�VPLACEHOLDER://params.text_encoder.model.encoder.block.6.layer.0.SelfAttention.o.kernel�q��kernel�VPLACEHOLDER://params.text_encoder.model.encoder.block.6.layer.0.SelfAttention.q.kernel�v��kernel�VPLACEHOLDER://params.text_encoder.model.encoder.block.6.layer.0.SelfAttention.v.kernel�layer_norm��weight�QPLACEHOLDER://params.text_encoder.model.encoder.block.6.layer.0.layer_norm.weight�1��DenseReluDense��wi��kernel�XPLACEHOLDER://params.text_encoder.model.encoder.block.6.layer.1.DenseReluDense.wi.kernel�wo��kernel�XPLACEHOLDER://params.text_encoder.model.encoder.block.6.layer.1.DenseReluDense.wo.kernel�layer_norm��weight�QPLACEHOLDER://params.text_encoder.model.encoder.block.6.layer.1.layer_norm.weight�7��layer��0��SelfAttention��k��kernel�VPLACEHOLDER://params.text_encoder.model.encoder.block.7.layer.0.SelfAttention.k.kernel�o��kernel�VPLACEHOLDER://params.text_encoder.model.encoder.block.7.layer.0.SelfAttention.o.kernel�q��kernel�VPLACEHOLDER://params.text_encoder.model.encoder.block.7.layer.0.SelfAttention.q.kernel�v��kernel�VPLACEHOLDER://params.text_encoder.model.encoder.block.7.layer.0.SelfAttention.v.kernel�layer_norm��weight�QPLACEHOLDER://params.text_encoder.model.encoder.block.7.layer.0.layer_norm.weight�1��DenseReluDense��wi��kernel�XPLACEHOLDER://params.text_encoder.model.encoder.block.7.layer.1.DenseReluDense.wi.kernel�wo��kernel�XPLACEHOLDER://params.text_encoder.model.encoder.block.7.layer.1.DenseReluDense.wo.kernel�layer_norm��weight�QPLACEHOLDER://params.text_encoder.model.encoder.block.7.layer.1.layer_norm.weight�8��layer��0��SelfAttention��k��kernel�VPLACEHOLDER://params.text_encoder.model.encoder.block.8.layer.0.SelfAttention.k.kernel�o��kernel�VPLACEHOLDER://params.text_encoder.model.encoder.block.8.layer.0.SelfAttention.o.kernel�q��kernel�VPLACEHOLDER://params.text_encoder.model.encoder.block.8.layer.0.SelfAttention.q.kernel�v��kernel�VPLACEHOLDER://params.text_encoder.model.encoder.block.8.layer.0.SelfAttention.v.kernel�layer_norm��weight�QPLACEHOLDER://params.text_encoder.model.encoder.block.8.layer.0.layer_norm.weight�1��DenseReluDense��wi��kernel�XPLACEHOLDER://params.text_encoder.model.encoder.block.8.layer.1.DenseReluDense.wi.kernel�wo��kernel�XPLACEHOLDER://params.text_encoder.model.encoder.block.8.layer.1.DenseReluDense.wo.kernel�layer_norm��weight�QPLACEHOLDER://params.text_encoder.model.encoder.block.8.layer.1.layer_norm.weight�9��layer��0��SelfAttention��k��kernel�VPLACEHOLDER://params.text_encoder.model.encoder.block.9.layer.0.SelfAttention.k.kernel�o��kernel�VPLACEHOLDER://params.text_encoder.model.encoder.block.9.layer.0.SelfAttention.o.kernel�q��kernel�VPLACEHOLDER://params.text_encoder.model.encoder.block.9.layer.0.SelfAttention.q.kernel�v��kernel�VPLACEHOLDER://params.text_encoder.model.encoder.block.9.layer.0.SelfAttention.v.kernel�layer_norm��weight�QPLACEHOLDER://params.text_encoder.model.encoder.block.9.layer.0.layer_norm.weight�1��DenseReluDense��wi��kernel�XPLACEHOLDER://params.text_encoder.model.encoder.block.9.layer.1.DenseReluDense.wi.kernel�wo��kernel�XPLACEHOLDER://params.text_encoder.model.encoder.block.9.layer.1.DenseReluDense.wo.kernel�layer_norm��weight�QPLACEHOLDER://params.text_encoder.model.encoder.block.9.layer.1.layer_norm.weight�final_layer_norm��weight�GPLACEHOLDER://params.text_encoder.model.encoder.final_layer_norm.weight�shared��embedding�8PLACEHOLDER://params.text_encoder.model.shared.embedding�rngs��diffusion�PLACEHOLDER://rngs.diffusion�dropout�PLACEHOLDER://rngs.dropout�params�PLACEHOLDER://rngs.params�patch_encoding�!PLACEHOLDER://rngs.patch_encoding�step�PLACEHOLDER://step�text_tokenize_fn�
default/metrics._reduction_counter.value/.zarray ADDED
@@ -0,0 +1 @@
 
 
1
+ {"chunks":[],"compressor":{"id":"zstd","level":1},"dimension_separator":".","dtype":"<i4","fill_value":null,"filters":null,"order":"C","shape":[],"zarr_format":2}
default/metrics._reduction_counter.value/0 ADDED
Binary file (13 Bytes). View file
 
default/metrics.loss.count/.zarray ADDED
@@ -0,0 +1 @@
 
 
1
+ {"chunks":[],"compressor":{"id":"zstd","level":1},"dimension_separator":".","dtype":"<i4","fill_value":null,"filters":null,"order":"C","shape":[],"zarr_format":2}
default/metrics.loss.count/0 ADDED
Binary file (13 Bytes). View file
 
default/metrics.loss.total/.zarray ADDED
@@ -0,0 +1 @@
 
 
1
+ {"chunks":[],"compressor":{"id":"zstd","level":1},"dimension_separator":".","dtype":"<f4","fill_value":null,"filters":null,"order":"C","shape":[],"zarr_format":2}
default/metrics.loss.total/0 ADDED
Binary file (13 Bytes). View file
 
default/opt_state.1.0.count/.zarray ADDED
@@ -0,0 +1 @@
 
 
1
+ {"chunks":[],"compressor":{"id":"zstd","level":1},"dimension_separator":".","dtype":"<i4","fill_value":null,"filters":null,"order":"C","shape":[],"zarr_format":2}
default/opt_state.1.0.count/0 ADDED
Binary file (13 Bytes). View file
 
default/opt_state.1.0.mu.attention_blocks.ScanEncoder1DBlock_0.LayerNorm_0.bias/.zarray ADDED
@@ -0,0 +1 @@
 
 
1
+ {"chunks":[12,768],"compressor":{"id":"zstd","level":1},"dimension_separator":".","dtype":"<f4","fill_value":null,"filters":null,"order":"C","shape":[12,768],"zarr_format":2}
default/opt_state.1.0.mu.attention_blocks.ScanEncoder1DBlock_0.LayerNorm_0.bias/0.0 ADDED
Binary file (34.1 kB). View file
 
default/opt_state.1.0.mu.attention_blocks.ScanEncoder1DBlock_0.LayerNorm_0.scale/.zarray ADDED
@@ -0,0 +1 @@
 
 
1
+ {"chunks":[12,768],"compressor":{"id":"zstd","level":1},"dimension_separator":".","dtype":"<f4","fill_value":null,"filters":null,"order":"C","shape":[12,768],"zarr_format":2}
default/opt_state.1.0.mu.attention_blocks.ScanEncoder1DBlock_0.LayerNorm_0.scale/0.0 ADDED
Binary file (34.2 kB). View file
 
default/opt_state.1.0.mu.attention_blocks.ScanEncoder1DBlock_0.LayerNorm_1.bias/.zarray ADDED
@@ -0,0 +1 @@
 
 
1
+ {"chunks":[12,768],"compressor":{"id":"zstd","level":1},"dimension_separator":".","dtype":"<f4","fill_value":null,"filters":null,"order":"C","shape":[12,768],"zarr_format":2}
default/opt_state.1.0.mu.attention_blocks.ScanEncoder1DBlock_0.LayerNorm_1.bias/0.0 ADDED
Binary file (34 kB). View file
 
default/opt_state.1.0.mu.attention_blocks.ScanEncoder1DBlock_0.LayerNorm_1.scale/.zarray ADDED
@@ -0,0 +1 @@
 
 
1
+ {"chunks":[12,768],"compressor":{"id":"zstd","level":1},"dimension_separator":".","dtype":"<f4","fill_value":null,"filters":null,"order":"C","shape":[12,768],"zarr_format":2}
default/opt_state.1.0.mu.attention_blocks.ScanEncoder1DBlock_0.LayerNorm_1.scale/0.0 ADDED
Binary file (34.2 kB). View file
 
default/opt_state.1.0.mu.attention_blocks.ScanEncoder1DBlock_0.MLPBlock_0.Dense_0.bias/.zarray ADDED
@@ -0,0 +1 @@
 
 
1
+ {"chunks":[12,3072],"compressor":{"id":"zstd","level":1},"dimension_separator":".","dtype":"<f4","fill_value":null,"filters":null,"order":"C","shape":[12,3072],"zarr_format":2}
default/opt_state.1.0.mu.attention_blocks.ScanEncoder1DBlock_0.MLPBlock_0.Dense_0.bias/0.0 ADDED
Binary file (137 kB). View file
 
default/opt_state.1.0.mu.attention_blocks.ScanEncoder1DBlock_0.MLPBlock_0.Dense_0.kernel/.zarray ADDED
@@ -0,0 +1 @@
 
 
1
+ {"chunks":[12,768,3072],"compressor":{"id":"zstd","level":1},"dimension_separator":".","dtype":"<f4","fill_value":null,"filters":null,"order":"C","shape":[12,768,3072],"zarr_format":2}
default/opt_state.1.0.mu.attention_blocks.ScanEncoder1DBlock_0.MLPBlock_0.Dense_0.kernel/0.0.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:550762c8e85c42b7c34d9f9e5758298f613be016f86825fba6473fcf125aca2b
3
+ size 105071752
default/opt_state.1.0.mu.attention_blocks.ScanEncoder1DBlock_0.MLPBlock_0.Dense_1.bias/.zarray ADDED
@@ -0,0 +1 @@
 
 
1
+ {"chunks":[12,768],"compressor":{"id":"zstd","level":1},"dimension_separator":".","dtype":"<f4","fill_value":null,"filters":null,"order":"C","shape":[12,768],"zarr_format":2}
default/opt_state.1.0.mu.attention_blocks.ScanEncoder1DBlock_0.MLPBlock_0.Dense_1.bias/0.0 ADDED
Binary file (21.7 kB). View file
 
default/opt_state.1.0.mu.attention_blocks.ScanEncoder1DBlock_0.MLPBlock_0.Dense_1.kernel/.zarray ADDED
@@ -0,0 +1 @@
 
 
1
+ {"chunks":[12,3072,768],"compressor":{"id":"zstd","level":1},"dimension_separator":".","dtype":"<f4","fill_value":null,"filters":null,"order":"C","shape":[12,3072,768],"zarr_format":2}
default/opt_state.1.0.mu.attention_blocks.ScanEncoder1DBlock_0.MLPBlock_0.Dense_1.kernel/0.0.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:251df3376e2557d4ae235d17460ff648770ea5d96cb5cf04b7f4d47b2d9f4f29
3
+ size 104935763
default/opt_state.1.0.mu.attention_blocks.ScanEncoder1DBlock_0.MultiHeadDotProductAttention_0.key.bias/.zarray ADDED
@@ -0,0 +1 @@
 
 
1
+ {"chunks":[12,12,64],"compressor":{"id":"zstd","level":1},"dimension_separator":".","dtype":"<f4","fill_value":null,"filters":null,"order":"C","shape":[12,12,64],"zarr_format":2}
default/opt_state.1.0.mu.attention_blocks.ScanEncoder1DBlock_0.MultiHeadDotProductAttention_0.key.bias/0.0.0 ADDED
Binary file (34.4 kB). View file
 
default/opt_state.1.0.mu.attention_blocks.ScanEncoder1DBlock_0.MultiHeadDotProductAttention_0.key.kernel/.zarray ADDED
@@ -0,0 +1 @@
 
 
1
+ {"chunks":[12,768,12,64],"compressor":{"id":"zstd","level":1},"dimension_separator":".","dtype":"<f4","fill_value":null,"filters":null,"order":"C","shape":[12,768,12,64],"zarr_format":2}
default/opt_state.1.0.mu.attention_blocks.ScanEncoder1DBlock_0.MultiHeadDotProductAttention_0.key.kernel/0.0.0.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0433c986195b192bb1c90a1d3a1135596d79d2a8fb7610bb97bebffed16bbf10
3
+ size 26210485
default/opt_state.1.0.mu.attention_blocks.ScanEncoder1DBlock_0.MultiHeadDotProductAttention_0.out.bias/.zarray ADDED
@@ -0,0 +1 @@
 
 
1
+ {"chunks":[12,768],"compressor":{"id":"zstd","level":1},"dimension_separator":".","dtype":"<f4","fill_value":null,"filters":null,"order":"C","shape":[12,768],"zarr_format":2}
default/opt_state.1.0.mu.attention_blocks.ScanEncoder1DBlock_0.MultiHeadDotProductAttention_0.out.bias/0.0 ADDED
Binary file (21.9 kB). View file
 
default/opt_state.1.0.mu.attention_blocks.ScanEncoder1DBlock_0.MultiHeadDotProductAttention_0.out.kernel/.zarray ADDED
@@ -0,0 +1 @@
 
 
1
+ {"chunks":[12,12,64,768],"compressor":{"id":"zstd","level":1},"dimension_separator":".","dtype":"<f4","fill_value":null,"filters":null,"order":"C","shape":[12,12,64,768],"zarr_format":2}
default/opt_state.1.0.mu.attention_blocks.ScanEncoder1DBlock_0.MultiHeadDotProductAttention_0.out.kernel/0.0.0.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f3eb2f7bd158dd7cc2837d32a8213ea43193a1aab005cad769803ab12b9a6f85
3
+ size 26202353
default/opt_state.1.0.mu.attention_blocks.ScanEncoder1DBlock_0.MultiHeadDotProductAttention_0.query.bias/.zarray ADDED
@@ -0,0 +1 @@
 
 
1
+ {"chunks":[12,12,64],"compressor":{"id":"zstd","level":1},"dimension_separator":".","dtype":"<f4","fill_value":null,"filters":null,"order":"C","shape":[12,12,64],"zarr_format":2}
default/opt_state.1.0.mu.attention_blocks.ScanEncoder1DBlock_0.MultiHeadDotProductAttention_0.query.bias/0.0.0 ADDED
Binary file (34.2 kB). View file
 
default/opt_state.1.0.mu.attention_blocks.ScanEncoder1DBlock_0.MultiHeadDotProductAttention_0.query.kernel/.zarray ADDED
@@ -0,0 +1 @@
 
 
1
+ {"chunks":[12,768,12,64],"compressor":{"id":"zstd","level":1},"dimension_separator":".","dtype":"<f4","fill_value":null,"filters":null,"order":"C","shape":[12,768,12,64],"zarr_format":2}
default/opt_state.1.0.mu.attention_blocks.ScanEncoder1DBlock_0.MultiHeadDotProductAttention_0.query.kernel/0.0.0.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:35e6c5cd62b71f2b764855e1682f1dc56eedcbb5a31818f711945df34800e746
3
+ size 26216435
default/opt_state.1.0.mu.attention_blocks.ScanEncoder1DBlock_0.MultiHeadDotProductAttention_0.value.bias/.zarray ADDED
@@ -0,0 +1 @@
 
 
1
+ {"chunks":[12,12,64],"compressor":{"id":"zstd","level":1},"dimension_separator":".","dtype":"<f4","fill_value":null,"filters":null,"order":"C","shape":[12,12,64],"zarr_format":2}
default/opt_state.1.0.mu.attention_blocks.ScanEncoder1DBlock_0.MultiHeadDotProductAttention_0.value.bias/0.0.0 ADDED
Binary file (34.1 kB). View file
 
default/opt_state.1.0.mu.attention_blocks.ScanEncoder1DBlock_0.MultiHeadDotProductAttention_0.value.kernel/.zarray ADDED
@@ -0,0 +1 @@
 
 
1
+ {"chunks":[12,768,12,64],"compressor":{"id":"zstd","level":1},"dimension_separator":".","dtype":"<f4","fill_value":null,"filters":null,"order":"C","shape":[12,768,12,64],"zarr_format":2}
default/opt_state.1.0.mu.attention_blocks.ScanEncoder1DBlock_0.MultiHeadDotProductAttention_0.value.kernel/0.0.0.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2554ac75f75e2df2e23734418b61d2a951494ccf6785b06c1654c35364075944
3
+ size 26184149
default/opt_state.1.0.mu.attention_blocks.posembed_input.pos_embedding/.zarray ADDED
@@ -0,0 +1 @@
 
 
1
+ {"chunks":[1,74,768],"compressor":{"id":"zstd","level":1},"dimension_separator":".","dtype":"<f4","fill_value":null,"filters":null,"order":"C","shape":[1,74,768],"zarr_format":2}
default/opt_state.1.0.mu.attention_blocks.posembed_input.pos_embedding/0.0.0 ADDED
Binary file (211 kB). View file
 
default/opt_state.1.0.mu.continuous_action_head.Dense_0.bias/.zarray ADDED
@@ -0,0 +1 @@
 
 
1
+ {"chunks":[7],"compressor":{"id":"zstd","level":1},"dimension_separator":".","dtype":"<f4","fill_value":null,"filters":null,"order":"C","shape":[7],"zarr_format":2}
default/opt_state.1.0.mu.continuous_action_head.Dense_0.bias/0 ADDED
Binary file (37 Bytes). View file
 
default/opt_state.1.0.mu.continuous_action_head.Dense_0.kernel/.zarray ADDED
@@ -0,0 +1 @@
 
 
1
+ {"chunks":[768,7],"compressor":{"id":"zstd","level":1},"dimension_separator":".","dtype":"<f4","fill_value":null,"filters":null,"order":"C","shape":[768,7],"zarr_format":2}
default/opt_state.1.0.mu.continuous_action_head.Dense_0.kernel/0.0 ADDED
Binary file (19.8 kB). View file
 
default/opt_state.1.0.mu.image_encoder.embedding_function.Conv_0.bias/.zarray ADDED
@@ -0,0 +1 @@
 
 
1
+ {"chunks":[64],"compressor":{"id":"zstd","level":1},"dimension_separator":".","dtype":"<f4","fill_value":null,"filters":null,"order":"C","shape":[64],"zarr_format":2}
default/opt_state.1.0.mu.image_encoder.embedding_function.Conv_0.bias/0 ADDED
Binary file (265 Bytes). View file