Duplicate from cortexso/gemma
Browse filesCo-authored-by: Jan Team <jan-ai@users.noreply.huggingface.co>
- .gitattributes +36 -0
- README.md +37 -0
- model.gguf +3 -0
- model.yml +20 -0
.gitattributes
ADDED
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
5 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
11 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
12 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
13 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
14 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
15 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
16 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
17 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
18 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
19 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
20 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
21 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
22 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
23 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
24 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
25 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
26 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
27 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
28 |
+
*.tar filter=lfs diff=lfs merge=lfs -text
|
29 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
30 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
31 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
32 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
33 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
+
model.gguf filter=lfs diff=lfs merge=lfs -text
|
README.md
ADDED
@@ -0,0 +1,37 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
license: gemma
|
3 |
+
---
|
4 |
+
|
5 |
+
## Overview
|
6 |
+
|
7 |
+
The [Gemma](https://huggingface.co/microsoft/Gemma-mini-4k-instruct), state-of-the-art open model trained with the Gemma datasets that includes both synthetic data and the filtered publicly available websites data with a focus on high-quality and reasoning dense properties. The model belongs to the Gemma family with the 4B, 7B version in two variants 8K and 128K which is the context length (in tokens) that it can support.
|
8 |
+
|
9 |
+
## Variants
|
10 |
+
|
11 |
+
| No | Variant | Cortex CLI command |
|
12 |
+
| --- | --- | --- |
|
13 |
+
| 1 | [7b-gguf](https://huggingface.co/cortexso/gemma/tree/7b-gguf) | `cortex run gemma:7b-gguf` |
|
14 |
+
| 2 | [7b-onnx](https://huggingface.co/cortexso/gemma/tree/7b-onnx) | `cortex run gemma:7b-onnx` |
|
15 |
+
|
16 |
+
## Use it with Jan (UI)
|
17 |
+
|
18 |
+
1. Install **Jan** using [Quickstart](https://jan.ai/docs/quickstart)
|
19 |
+
2. Use in Jan model Hub:
|
20 |
+
```
|
21 |
+
cortexso/gemma
|
22 |
+
```
|
23 |
+
|
24 |
+
## Use it with Cortex (CLI)
|
25 |
+
|
26 |
+
1. Install **Cortex** using [Quickstart](https://cortex.jan.ai/docs/quickstart)
|
27 |
+
2. Run the model with command:
|
28 |
+
```
|
29 |
+
cortex run gemma
|
30 |
+
```
|
31 |
+
|
32 |
+
## Credits
|
33 |
+
|
34 |
+
- **Author:** Google
|
35 |
+
- **Converter:** [Homebrew](https://www.homebrew.ltd/)
|
36 |
+
- **Original License:** [License](https://ai.google.dev/gemma/terms)
|
37 |
+
- **Papers:** [Gemma Technical Report](https://arxiv.org/abs/2403.08295)
|
model.gguf
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:bd87fc03fe3d651ac419ae35f4a0e6d861167db156572b6c4b90fb81822a3547
|
3 |
+
size 5127231648
|
model.yml
ADDED
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
name: gemma
|
2 |
+
model: gemma:7B
|
3 |
+
version: 1
|
4 |
+
|
5 |
+
# Results Preferences
|
6 |
+
stop:
|
7 |
+
- <end_of_turn>
|
8 |
+
- <eos>
|
9 |
+
top_p: 0.95
|
10 |
+
temperature: 0.7
|
11 |
+
frequency_penalty: 0
|
12 |
+
presence_penalty: 0
|
13 |
+
max_tokens: 4096 # Infer from base config.json -> max_position_embeddings
|
14 |
+
stream: true # true | false
|
15 |
+
|
16 |
+
# Engine / Model Settings
|
17 |
+
ngl: 33 # Infer from base config.json -> num_attention_heads
|
18 |
+
ctx_len: 4096 # Infer from base config.json -> max_position_embeddings
|
19 |
+
engine: cortex.llamacpp
|
20 |
+
prompt_template: "<start_of_turn>user\n{prompt}<end_of_turn>\n<start_of_turn>model"
|