evaldas-leliuga
commited on
Added models
Browse files- .gitattributes +12 -0
- README.md +21 -0
- config.json +26 -0
- gemma-2b.Q2_K.gguf +3 -0
- gemma-2b.Q3_K_L.gguf +3 -0
- gemma-2b.Q3_K_M.gguf +3 -0
- gemma-2b.Q3_K_S.gguf +3 -0
- gemma-2b.Q4_0.gguf +3 -0
- gemma-2b.Q4_K_M.gguf +3 -0
- gemma-2b.Q4_K_S.gguf +3 -0
- gemma-2b.Q5_0.gguf +3 -0
- gemma-2b.Q5_K_M.gguf +3 -0
- gemma-2b.Q5_K_S.gguf +3 -0
- gemma-2b.Q6_K.gguf +3 -0
- gemma-2b.Q8_0.gguf +3 -0
.gitattributes
CHANGED
@@ -33,3 +33,15 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
+
gemma-2b.Q2_K.gguf filter=lfs diff=lfs merge=lfs -text
|
37 |
+
gemma-2b.Q3_K_L.gguf filter=lfs diff=lfs merge=lfs -text
|
38 |
+
gemma-2b.Q3_K_M.gguf filter=lfs diff=lfs merge=lfs -text
|
39 |
+
gemma-2b.Q3_K_S.gguf filter=lfs diff=lfs merge=lfs -text
|
40 |
+
gemma-2b.Q4_0.gguf filter=lfs diff=lfs merge=lfs -text
|
41 |
+
gemma-2b.Q4_K_M.gguf filter=lfs diff=lfs merge=lfs -text
|
42 |
+
gemma-2b.Q4_K_S.gguf filter=lfs diff=lfs merge=lfs -text
|
43 |
+
gemma-2b.Q5_0.gguf filter=lfs diff=lfs merge=lfs -text
|
44 |
+
gemma-2b.Q5_K_M.gguf filter=lfs diff=lfs merge=lfs -text
|
45 |
+
gemma-2b.Q5_K_S.gguf filter=lfs diff=lfs merge=lfs -text
|
46 |
+
gemma-2b.Q6_K.gguf filter=lfs diff=lfs merge=lfs -text
|
47 |
+
gemma-2b.Q8_0.gguf filter=lfs diff=lfs merge=lfs -text
|
README.md
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
base_model: google/gemma-2b
|
3 |
+
license: other
|
4 |
+
license_name: gemma-terms-of-use
|
5 |
+
license_link: https://ai.google.dev/gemma/terms
|
6 |
+
inference: false
|
7 |
+
model_creator: Google
|
8 |
+
model_name: gemma-2b
|
9 |
+
quantized_by: Leliuga
|
10 |
+
pipeline_tag: text-generation
|
11 |
+
tags:
|
12 |
+
- gemma
|
13 |
+
- gguf
|
14 |
+
---
|
15 |
+
# gemma-2b - GGUF
|
16 |
+
- Model creator: [Sentence Transformers](https://huggingface.co/google)
|
17 |
+
- Original model: [gemma-2b](https://huggingface.co/google/gemma-2b)
|
18 |
+
|
19 |
+
## Description
|
20 |
+
|
21 |
+
This repo contains GGUF format model files for [gemma-2b](https://huggingface.co/google/gemma-2b).
|
config.json
ADDED
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"architectures": [
|
3 |
+
"GemmaForCausalLM"
|
4 |
+
],
|
5 |
+
"attention_bias": false,
|
6 |
+
"attention_dropout": 0.0,
|
7 |
+
"bos_token_id": 2,
|
8 |
+
"eos_token_id": 1,
|
9 |
+
"head_dim": 256,
|
10 |
+
"hidden_act": "gelu",
|
11 |
+
"hidden_size": 2048,
|
12 |
+
"initializer_range": 0.02,
|
13 |
+
"intermediate_size": 16384,
|
14 |
+
"max_position_embeddings": 8192,
|
15 |
+
"model_type": "gemma",
|
16 |
+
"num_attention_heads": 8,
|
17 |
+
"num_hidden_layers": 18,
|
18 |
+
"num_key_value_heads": 1,
|
19 |
+
"pad_token_id": 0,
|
20 |
+
"rms_norm_eps": 1e-06,
|
21 |
+
"rope_scaling": null,
|
22 |
+
"rope_theta": 10000.0,
|
23 |
+
"torch_dtype": "bfloat16",
|
24 |
+
"use_cache": true,
|
25 |
+
"vocab_size": 256000
|
26 |
+
}
|
gemma-2b.Q2_K.gguf
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:9fe94b75a14647d3b15e62afefde54f670754f50fa333bbe2023102e717d580b
|
3 |
+
size 1157923936
|
gemma-2b.Q3_K_L.gguf
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:003fd910720a95d5e4a539b30cd5315168fc2af389d9519ca73c1e372f42d26d
|
3 |
+
size 1465590880
|
gemma-2b.Q3_K_M.gguf
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:5a63d0c3da2045da968b842a77b9669c293724f381dd10522ae2bcbc15546c90
|
3 |
+
size 1383801952
|
gemma-2b.Q3_K_S.gguf
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:c7ba57426d42e23ff8ac885452c1909a1fc39d0ad6d69e49a3abdd3e99b21377
|
3 |
+
size 1287980128
|
gemma-2b.Q4_0.gguf
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:86b26c9c223236b568b11a3d0ec66fd7b52431d7f9f18e0780c16af54dc2d8af
|
3 |
+
size 1551189088
|
gemma-2b.Q4_K_M.gguf
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:7f0bc6672239d48a84cabe9f5a977b3eb801048be80280da17a5f589664aebe4
|
3 |
+
size 1630262368
|
gemma-2b.Q4_K_S.gguf
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:96eab41bb9290bfcae482ff4d819a19b372b884ae5eff8a869d4806d9e1544cf
|
3 |
+
size 1559839840
|
gemma-2b.Q5_0.gguf
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:0ebb76dc9e80b68d39258725450ac6b0faba9e4955c3fbf2d543f284d3e0e803
|
3 |
+
size 1798915168
|
gemma-2b.Q5_K_M.gguf
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:0e2549fad3cbeb5a48afa6cb096a1edc428f6ad1bd03de0b0880788c56273e90
|
3 |
+
size 1839649888
|
gemma-2b.Q5_K_S.gguf
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:3e41ee27714a3fd6ef75a54863e6c49f1de14512f220ad8f130949142f128c7d
|
3 |
+
size 1798915168
|
gemma-2b.Q6_K.gguf
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:0d05cf7508e345d470b1a1261f5ede482ba2fe0e08503198073d85d3b2cf4cb0
|
3 |
+
size 2062124128
|
gemma-2b.Q8_0.gguf
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:974ad4ce7d332aa4081f393836b950b785d5395b5c7a5021d6b9ec7c92eb8a1d
|
3 |
+
size 2669069408
|