harishnair04 commited on
Commit
34b2082
1 Parent(s): 045a4ac

Upload folder using huggingface_hub

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ assets/tokenizer/vocabulary.spm filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: keras-hub
3
+ pipeline_tag: text-generation
4
+ ---
5
+ This is a [`Gemma` model](https://keras.io/api/keras_hub/models/gemma) uploaded using the KerasHub library and can be used with JAX, TensorFlow, and PyTorch backends.
6
+ This model is related to a `CausalLM` task.
7
+
8
+ Model config:
9
+ * **name:** gemma_backbone
10
+ * **trainable:** True
11
+ * **vocabulary_size:** 256000
12
+ * **num_layers:** 18
13
+ * **num_query_heads:** 8
14
+ * **num_key_value_heads:** 1
15
+ * **hidden_dim:** 2048
16
+ * **intermediate_dim:** 32768
17
+ * **head_dim:** 256
18
+ * **layer_norm_epsilon:** 1e-06
19
+ * **dropout:** 0
20
+ * **query_head_dim_normalize:** True
21
+ * **use_post_ffw_norm:** False
22
+ * **use_post_attention_norm:** False
23
+ * **final_logit_soft_cap:** None
24
+ * **attention_logit_soft_cap:** None
25
+ * **sliding_window_size:** 4096
26
+ * **use_sliding_window_attention:** False
27
+
28
+ This model card has been generated automatically and should be completed by the model author. See [Model Cards documentation](https://huggingface.co/docs/hub/model-cards) for more information.
assets/tokenizer/vocabulary.spm ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:61a7b147390c64585d6c3543dd6fc636906c9af3865a5548f27f31aee1d4c8e2
3
+ size 4241003
config.json ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "module": "keras_hub.src.models.gemma.gemma_backbone",
3
+ "class_name": "GemmaBackbone",
4
+ "config": {
5
+ "name": "gemma_backbone",
6
+ "trainable": true,
7
+ "vocabulary_size": 256000,
8
+ "num_layers": 18,
9
+ "num_query_heads": 8,
10
+ "num_key_value_heads": 1,
11
+ "hidden_dim": 2048,
12
+ "intermediate_dim": 32768,
13
+ "head_dim": 256,
14
+ "layer_norm_epsilon": 1e-06,
15
+ "dropout": 0,
16
+ "query_head_dim_normalize": true,
17
+ "use_post_ffw_norm": false,
18
+ "use_post_attention_norm": false,
19
+ "final_logit_soft_cap": null,
20
+ "attention_logit_soft_cap": null,
21
+ "sliding_window_size": 4096,
22
+ "use_sliding_window_attention": false
23
+ },
24
+ "registered_name": "keras_hub>GemmaBackbone"
25
+ }
metadata.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "keras_version": "3.6.0",
3
+ "keras_hub_version": "0.17.0",
4
+ "parameter_count": 2508900352,
5
+ "date_saved": "2024-11-01@16:40:25"
6
+ }
model.weights.h5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0decc3f3937cf2341b35ae9ac2abff03fdd7a24e23c066df818c81f0d86ea12b
3
+ size 10025273304
preprocessor.json ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "module": "keras_hub.src.models.gemma.gemma_causal_lm_preprocessor",
3
+ "class_name": "GemmaCausalLMPreprocessor",
4
+ "config": {
5
+ "name": "gemma_causal_lm_preprocessor",
6
+ "trainable": true,
7
+ "dtype": {
8
+ "module": "keras",
9
+ "class_name": "DTypePolicy",
10
+ "config": {
11
+ "name": "float32"
12
+ },
13
+ "registered_name": null
14
+ },
15
+ "tokenizer": {
16
+ "module": "keras_hub.src.models.gemma.gemma_tokenizer",
17
+ "class_name": "GemmaTokenizer",
18
+ "config": {
19
+ "name": "gemma_tokenizer",
20
+ "trainable": true,
21
+ "dtype": {
22
+ "module": "keras",
23
+ "class_name": "DTypePolicy",
24
+ "config": {
25
+ "name": "int32"
26
+ },
27
+ "registered_name": null
28
+ },
29
+ "config_file": "tokenizer.json",
30
+ "proto": null,
31
+ "sequence_length": null,
32
+ "add_bos": false,
33
+ "add_eos": false
34
+ },
35
+ "registered_name": "keras_hub>GemmaTokenizer"
36
+ },
37
+ "config_file": "preprocessor.json",
38
+ "sequence_length": 512,
39
+ "add_start_token": true,
40
+ "add_end_token": true
41
+ },
42
+ "registered_name": "keras_hub>GemmaCausalLMPreprocessor"
43
+ }
task.json ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "module": "keras_hub.src.models.gemma.gemma_causal_lm",
3
+ "class_name": "GemmaCausalLM",
4
+ "config": {
5
+ "backbone": {
6
+ "module": "keras_hub.src.models.gemma.gemma_backbone",
7
+ "class_name": "GemmaBackbone",
8
+ "config": {
9
+ "name": "gemma_backbone",
10
+ "trainable": true,
11
+ "vocabulary_size": 256000,
12
+ "num_layers": 18,
13
+ "num_query_heads": 8,
14
+ "num_key_value_heads": 1,
15
+ "hidden_dim": 2048,
16
+ "intermediate_dim": 32768,
17
+ "head_dim": 256,
18
+ "layer_norm_epsilon": 1e-06,
19
+ "dropout": 0,
20
+ "query_head_dim_normalize": true,
21
+ "use_post_ffw_norm": false,
22
+ "use_post_attention_norm": false,
23
+ "final_logit_soft_cap": null,
24
+ "attention_logit_soft_cap": null,
25
+ "sliding_window_size": 4096,
26
+ "use_sliding_window_attention": false
27
+ },
28
+ "registered_name": "keras_hub>GemmaBackbone"
29
+ },
30
+ "preprocessor": {
31
+ "module": "keras_hub.src.models.gemma.gemma_causal_lm_preprocessor",
32
+ "class_name": "GemmaCausalLMPreprocessor",
33
+ "config": {
34
+ "name": "gemma_causal_lm_preprocessor",
35
+ "trainable": true,
36
+ "dtype": {
37
+ "module": "keras",
38
+ "class_name": "DTypePolicy",
39
+ "config": {
40
+ "name": "float32"
41
+ },
42
+ "registered_name": null
43
+ },
44
+ "tokenizer": {
45
+ "module": "keras_hub.src.models.gemma.gemma_tokenizer",
46
+ "class_name": "GemmaTokenizer",
47
+ "config": {
48
+ "name": "gemma_tokenizer",
49
+ "trainable": true,
50
+ "dtype": {
51
+ "module": "keras",
52
+ "class_name": "DTypePolicy",
53
+ "config": {
54
+ "name": "int32"
55
+ },
56
+ "registered_name": null
57
+ },
58
+ "config_file": "tokenizer.json",
59
+ "proto": null,
60
+ "sequence_length": null,
61
+ "add_bos": false,
62
+ "add_eos": false
63
+ },
64
+ "registered_name": "keras_hub>GemmaTokenizer"
65
+ },
66
+ "config_file": "preprocessor.json",
67
+ "sequence_length": 512,
68
+ "add_start_token": true,
69
+ "add_end_token": true
70
+ },
71
+ "registered_name": "keras_hub>GemmaCausalLMPreprocessor"
72
+ },
73
+ "name": "gemma_causal_lm"
74
+ },
75
+ "registered_name": "keras_hub>GemmaCausalLM"
76
+ }
tokenizer.json ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "module": "keras_hub.src.models.gemma.gemma_tokenizer",
3
+ "class_name": "GemmaTokenizer",
4
+ "config": {
5
+ "name": "gemma_tokenizer",
6
+ "trainable": true,
7
+ "dtype": {
8
+ "module": "keras",
9
+ "class_name": "DTypePolicy",
10
+ "config": {
11
+ "name": "int32"
12
+ },
13
+ "registered_name": null
14
+ },
15
+ "config_file": "tokenizer.json",
16
+ "proto": null,
17
+ "sequence_length": null,
18
+ "add_bos": false,
19
+ "add_eos": false
20
+ },
21
+ "registered_name": "keras_hub>GemmaTokenizer"
22
+ }