LoneStriker commited on
Commit
51d429f
1 Parent(s): de3c427

Upload folder using huggingface_hub

Browse files
.gitattributes CHANGED
@@ -1,35 +1,10 @@
1
- *.7z filter=lfs diff=lfs merge=lfs -text
2
- *.arrow filter=lfs diff=lfs merge=lfs -text
3
- *.bin filter=lfs diff=lfs merge=lfs -text
4
- *.bz2 filter=lfs diff=lfs merge=lfs -text
5
- *.ckpt filter=lfs diff=lfs merge=lfs -text
6
- *.ftz filter=lfs diff=lfs merge=lfs -text
7
- *.gz filter=lfs diff=lfs merge=lfs -text
8
- *.h5 filter=lfs diff=lfs merge=lfs -text
9
- *.joblib filter=lfs diff=lfs merge=lfs -text
10
- *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
- *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
- *.model filter=lfs diff=lfs merge=lfs -text
13
- *.msgpack filter=lfs diff=lfs merge=lfs -text
14
- *.npy filter=lfs diff=lfs merge=lfs -text
15
- *.npz filter=lfs diff=lfs merge=lfs -text
16
- *.onnx filter=lfs diff=lfs merge=lfs -text
17
- *.ot filter=lfs diff=lfs merge=lfs -text
18
- *.parquet filter=lfs diff=lfs merge=lfs -text
19
- *.pb filter=lfs diff=lfs merge=lfs -text
20
- *.pickle filter=lfs diff=lfs merge=lfs -text
21
- *.pkl filter=lfs diff=lfs merge=lfs -text
22
- *.pt filter=lfs diff=lfs merge=lfs -text
23
- *.pth filter=lfs diff=lfs merge=lfs -text
24
- *.rar filter=lfs diff=lfs merge=lfs -text
25
- *.safetensors filter=lfs diff=lfs merge=lfs -text
26
- saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
- *.tar.* filter=lfs diff=lfs merge=lfs -text
28
- *.tar filter=lfs diff=lfs merge=lfs -text
29
- *.tflite filter=lfs diff=lfs merge=lfs -text
30
- *.tgz filter=lfs diff=lfs merge=lfs -text
31
- *.wasm filter=lfs diff=lfs merge=lfs -text
32
- *.xz filter=lfs diff=lfs merge=lfs -text
33
- *.zip filter=lfs diff=lfs merge=lfs -text
34
- *.zst filter=lfs diff=lfs merge=lfs -text
35
- *tfevents* filter=lfs diff=lfs merge=lfs -text
 
1
+ gemma-2b-it-Q3_K_L.gguf filter=lfs diff=lfs merge=lfs -text
2
+ gemma-2b-it-Q3_K_M.gguf filter=lfs diff=lfs merge=lfs -text
3
+ gemma-2b-it-Q3_K_S.gguf filter=lfs diff=lfs merge=lfs -text
4
+ gemma-2b-it-Q4_K_M.gguf filter=lfs diff=lfs merge=lfs -text
5
+ gemma-2b-it-Q4_K_S.gguf filter=lfs diff=lfs merge=lfs -text
6
+ gemma-2b-it-Q5_K_M.gguf filter=lfs diff=lfs merge=lfs -text
7
+ gemma-2b-it-Q5_K_S.gguf filter=lfs diff=lfs merge=lfs -text
8
+ gemma-2b-it-Q6_K.gguf filter=lfs diff=lfs merge=lfs -text
9
+ gemma-2b-it-Q8_0.gguf filter=lfs diff=lfs merge=lfs -text
10
+ gemma-2b-it.gguf filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
README.md ADDED
@@ -0,0 +1,496 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: transformers
3
+ tags: []
4
+ widget:
5
+ - text: |
6
+ <start_of_turn>user
7
+ How does the brain work?<end_of_turn>
8
+ <start_of_turn>model
9
+ inference:
10
+ parameters:
11
+ max_new_tokens: 200
12
+ extra_gated_heading: "Access Gemma on Hugging Face"
13
+ extra_gated_prompt: "To access Gemma on Hugging Face, you’re required to review and agree to Google’s usage license. To do this, please ensure you’re logged-in to Hugging Face and click below. Requests are processed immediately."
14
+ extra_gated_button_content: "Acknowledge license"
15
+ license: other
16
+ license_name: gemma-terms-of-use
17
+ license_link: https://ai.google.dev/gemma/terms
18
+ ---
19
+
20
+ # Gemma Model Card
21
+
22
+ **Model Page**: [Gemma](https://ai.google.dev/gemma/docs)
23
+
24
+ This model card corresponds to the 2B instruct version of the Gemma model. You can also visit the model card of the [2B base model](https://huggingface.co/google/gemma-2b), [7B base model](https://huggingface.co/google/gemma-7b), and [7B instruct model](https://huggingface.co/google/gemma-7b-it).
25
+
26
+ **Resources and Technical Documentation**:
27
+
28
+ * [Responsible Generative AI Toolkit](https://ai.google.dev/responsible)
29
+ * [Gemma on Kaggle](https://www.kaggle.com/models/google/gemma)
30
+ * [Gemma on Vertex Model Garden](https://console.cloud.google.com/vertex-ai/publishers/google/model-garden/335?version=gemma-2b-it-gg-hf)
31
+
32
+ **Terms of Use**: [Terms](https://www.kaggle.com/models/google/gemma/license/consent)
33
+
34
+ **Authors**: Google
35
+
36
+ ## Model Information
37
+
38
+ Summary description and brief definition of inputs and outputs.
39
+
40
+ ### Description
41
+
42
+ Gemma is a family of lightweight, state-of-the-art open models from Google,
43
+ built from the same research and technology used to create the Gemini models.
44
+ They are text-to-text, decoder-only large language models, available in English,
45
+ with open weights, pre-trained variants, and instruction-tuned variants. Gemma
46
+ models are well-suited for a variety of text generation tasks, including
47
+ question answering, summarization, and reasoning. Their relatively small size
48
+ makes it possible to deploy them in environments with limited resources such as
49
+ a laptop, desktop or your own cloud infrastructure, democratizing access to
50
+ state of the art AI models and helping foster innovation for everyone.
51
+
52
+ ### Usage
53
+
54
+ Below we share some code snippets on how to get quickly started with running the model. First make sure to `pip install -U transformers`, then copy the snippet from the section that is relevant for your usecase.
55
+
56
+ #### Running the model on a CPU
57
+
58
+
59
+ ```python
60
+ from transformers import AutoTokenizer, AutoModelForCausalLM
61
+
62
+ tokenizer = AutoTokenizer.from_pretrained("google/gemma-2b-it")
63
+ model = AutoModelForCausalLM.from_pretrained("google/gemma-2b-it")
64
+
65
+ input_text = "Write me a poem about Machine Learning."
66
+ input_ids = tokenizer(input_text, return_tensors="pt")
67
+
68
+ outputs = model.generate(**input_ids)
69
+ print(tokenizer.decode(outputs[0]))
70
+ ```
71
+
72
+
73
+ #### Running the model on a single / multi GPU
74
+
75
+
76
+ ```python
77
+ # pip install accelerate
78
+ from transformers import AutoTokenizer, AutoModelForCausalLM
79
+
80
+ tokenizer = AutoTokenizer.from_pretrained("google/gemma-2b-it")
81
+ model = AutoModelForCausalLM.from_pretrained("google/gemma-2b-it", device_map="auto")
82
+
83
+ input_text = "Write me a poem about Machine Learning."
84
+ input_ids = tokenizer(input_text, return_tensors="pt").to("cuda")
85
+
86
+ outputs = model.generate(**input_ids)
87
+ print(tokenizer.decode(outputs[0]))
88
+ ```
89
+
90
+
91
+ #### Running the model on a GPU using different precisions
92
+
93
+ * _Using `torch.float16`_
94
+
95
+ ```python
96
+ # pip install accelerate
97
+ from transformers import AutoTokenizer, AutoModelForCausalLM
98
+
99
+ tokenizer = AutoTokenizer.from_pretrained("google/gemma-2b-it")
100
+ model = AutoModelForCausalLM.from_pretrained("google/gemma-2b-it", device_map="auto", torch_dtype=torch.float16)
101
+
102
+ input_text = "Write me a poem about Machine Learning."
103
+ input_ids = tokenizer(input_text, return_tensors="pt").to("cuda")
104
+
105
+ outputs = model.generate(**input_ids)
106
+ print(tokenizer.decode(outputs[0]))
107
+ ```
108
+
109
+ * _Using `torch.bfloat16`_
110
+
111
+ ```python
112
+ # pip install accelerate
113
+ from transformers import AutoTokenizer, AutoModelForCausalLM
114
+
115
+ tokenizer = AutoTokenizer.from_pretrained("google/gemma-2b-it")
116
+ model = AutoModelForCausalLM.from_pretrained("google/gemma-2b-it", device_map="auto", torch_dtype=torch.bfloat16)
117
+
118
+ input_text = "Write me a poem about Machine Learning."
119
+ input_ids = tokenizer(input_text, return_tensors="pt").to("cuda")
120
+
121
+ outputs = model.generate(**input_ids)
122
+ print(tokenizer.decode(outputs[0]))
123
+ ```
124
+
125
+ #### Quantized Versions through `bitsandbytes`
126
+
127
+ * _Using 8-bit precision (int8)_
128
+
129
+ ```python
130
+ # pip install bitsandbytes accelerate
131
+ from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig
132
+
133
+ quantization_config = BitsAndBytesConfig(load_in_8bit=True)
134
+
135
+ tokenizer = AutoTokenizer.from_pretrained("google/gemma-2b-it")
136
+ model = AutoModelForCausalLM.from_pretrained("google/gemma-2b-it", quantization_config=quantization_config)
137
+
138
+ input_text = "Write me a poem about Machine Learning."
139
+ input_ids = tokenizer(input_text, return_tensors="pt").to("cuda")
140
+
141
+ outputs = model.generate(**input_ids)
142
+ print(tokenizer.decode(outputs[0]))
143
+ ```
144
+
145
+ * _Using 4-bit precision_
146
+
147
+ ```python
148
+ # pip install bitsandbytes accelerate
149
+ from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig
150
+
151
+ quantization_config = BitsAndBytesConfig(load_in_4bit=True)
152
+
153
+ tokenizer = AutoTokenizer.from_pretrained("google/gemma-2b-it")
154
+ model = AutoModelForCausalLM.from_pretrained("google/gemma-2b-it", quantization_config=quantization_config)
155
+
156
+ input_text = "Write me a poem about Machine Learning."
157
+ input_ids = tokenizer(input_text, return_tensors="pt").to("cuda")
158
+
159
+ outputs = model.generate(**input_ids)
160
+ print(tokenizer.decode(outputs[0]))
161
+ ```
162
+
163
+
164
+ #### Other optimizations
165
+
166
+ * _Flash Attention 2_
167
+
168
+ First make sure to install `flash-attn` in your environment `pip install flash-attn`
169
+
170
+ ```diff
171
+ model = AutoModelForCausalLM.from_pretrained(
172
+ model_id,
173
+ torch_dtype=torch.float16,
174
+ + attn_implementation="flash_attention_2"
175
+ ).to(0)
176
+ ```
177
+
178
+ ### Chat Template
179
+
180
+ The instruction-tuned models use a chat template that must be adhered to for conversational use.
181
+ The easiest way to apply it is using the tokenizer's built-in chat template, as shown in the following snippet.
182
+
183
+ Let's load the model and apply the chat template to a conversation. In this example, we'll start with a single user interaction:
184
+
185
+ ```py
186
+ from transformers import AutoTokenizer, AutoModelForCausalLM
187
+ import transformers
188
+ import torch
189
+
190
+ model_id = "gg-hf/gemma-2b-it"
191
+ dtype = torch.bfloat16
192
+
193
+ tokenizer = AutoTokenizer.from_pretrained(model_id)
194
+ model = AutoModelForCausalLM.from_pretrained(
195
+ model_id,
196
+ device_map="cuda",
197
+ torch_dtype=dtype,
198
+ )
199
+
200
+ chat = [
201
+ { "role": "user", "content": "Write a hello world program" },
202
+ ]
203
+ prompt = tokenizer.apply_chat_template(chat, tokenize=False, add_generation_prompt=True)
204
+ ```
205
+
206
+ At this point, the prompt contains the following text:
207
+
208
+ ```
209
+ <start_of_turn>user
210
+ Write a hello world program<end_of_turn>
211
+ <start_of_turn>model
212
+ ```
213
+
214
+ As you can see, each turn is preceeded by a `<start_of_turn>` delimiter and then the role of the entity
215
+ (either `user`, for content supplied by the user, or `model` for LLM responses). Turns finish with
216
+ the `<end_of_turn>` token.
217
+
218
+ You can follow this format to build the prompt manually, if you need to do it without the tokenizer's
219
+ chat template.
220
+
221
+ After the prompt is ready, generation can be performed like this:
222
+
223
+ ```py
224
+ inputs = tokenizer.encode(prompt, add_special_tokens=True, return_tensors="pt")
225
+ outputs = model.generate(input_ids=inputs.to(model.device), max_new_tokens=150)
226
+ ```
227
+
228
+ ### Inputs and outputs
229
+
230
+ * **Input:** Text string, such as a question, a prompt, or a document to be
231
+ summarized.
232
+ * **Output:** Generated English-language text in response to the input, such
233
+ as an answer to a question, or a summary of a document.
234
+
235
+ ## Model Data
236
+
237
+ Data used for model training and how the data was processed.
238
+
239
+ ### Training Dataset
240
+
241
+ These models were trained on a dataset of text data that includes a wide variety
242
+ of sources, totaling 6 trillion tokens. Here are the key components:
243
+
244
+ * Web Documents: A diverse collection of web text ensures the model is exposed
245
+ to a broad range of linguistic styles, topics, and vocabulary. Primarily
246
+ English-language content.
247
+ * Code: Exposing the model to code helps it to learn the syntax and patterns of
248
+ programming languages, which improves its ability to generate code or
249
+ understand code-related questions.
250
+ * Mathematics: Training on mathematical text helps the model learn logical
251
+ reasoning, symbolic representation, and to address mathematical queries.
252
+
253
+ The combination of these diverse data sources is crucial for training a powerful
254
+ language model that can handle a wide variety of different tasks and text
255
+ formats.
256
+
257
+ ### Data Preprocessing
258
+
259
+ Here are the key data cleaning and filtering methods applied to the training
260
+ data:
261
+
262
+ * CSAM Filtering: Rigorous CSAM (Child Sexual Abuse Material) filtering was
263
+ applied at multiple stages in the data preparation process to ensure the
264
+ exclusion of harmful and illegal content
265
+ * Sensitive Data Filtering: As part of making Gemma pre-trained models safe and
266
+ reliable, automated techniques were used to filter out certain personal
267
+ information and other sensitive data from training sets.
268
+ * Additional methods: Filtering based on content quality and safely in line with
269
+ [our policies](https://storage.googleapis.com/gweb-uniblog-publish-prod/documents/2023_Google_AI_Principles_Progress_Update.pdf#page=11).
270
+
271
+ ## Implementation Information
272
+
273
+ Details about the model internals.
274
+
275
+ ### Hardware
276
+
277
+ Gemma was trained using the latest generation of
278
+ [Tensor Processing Unit (TPU)](https://cloud.google.com/tpu/docs/intro-to-tpu) hardware (TPUv5e).
279
+
280
+ Training large language models requires significant computational power. TPUs,
281
+ designed specifically for matrix operations common in machine learning, offer
282
+ several advantages in this domain:
283
+
284
+ * Performance: TPUs are specifically designed to handle the massive computations
285
+ involved in training LLMs. They can speed up training considerably compared to
286
+ CPUs.
287
+ * Memory: TPUs often come with large amounts of high-bandwidth memory, allowing
288
+ for the handling of large models and batch sizes during training. This can
289
+ lead to better model quality.
290
+ * Scalability: TPU Pods (large clusters of TPUs) provide a scalable solution for
291
+ handling the growing complexity of large foundation models. You can distribute
292
+ training across multiple TPU devices for faster and more efficient processing.
293
+ * Cost-effectiveness: In many scenarios, TPUs can provide a more cost-effective
294
+ solution for training large models compared to CPU-based infrastructure,
295
+ especially when considering the time and resources saved due to faster
296
+ training.
297
+ * These advantages are aligned with
298
+ [Google's commitments to operate sustainably](https://sustainability.google/operating-sustainably/).
299
+
300
+ ### Software
301
+
302
+ Training was done using [JAX](https://github.com/google/jax) and [ML Pathways](https://blog.google/technology/ai/introducing-pathways-next-generation-ai-architecture/ml-pathways).
303
+
304
+ JAX allows researchers to take advantage of the latest generation of hardware,
305
+ including TPUs, for faster and more efficient training of large models.
306
+
307
+ ML Pathways is Google's latest effort to build artificially intelligent systems
308
+ capable of generalizing across multiple tasks. This is specially suitable for
309
+ [foundation models](https://ai.google/discover/foundation-models/), including large language models like
310
+ these ones.
311
+
312
+ Together, JAX and ML Pathways are used as described in the
313
+ [paper about the Gemini family of models](https://arxiv.org/abs/2312.11805); "the 'single
314
+ controller' programming model of Jax and Pathways allows a single Python
315
+ process to orchestrate the entire training run, dramatically simplifying the
316
+ development workflow."
317
+
318
+ ## Evaluation
319
+
320
+ Model evaluation metrics and results.
321
+
322
+ ### Benchmark Results
323
+
324
+ These models were evaluated against a large collection of different datasets and
325
+ metrics to cover different aspects of text generation:
326
+
327
+ | Benchmark | Metric | 2B Params | 7B Params |
328
+ | ------------------------------ | ------------- | ----------- | --------- |
329
+ | [MMLU](https://arxiv.org/abs/2009.03300) | 5-shot, top-1 | 42.3 | 64.3 |
330
+ | [HellaSwag](https://arxiv.org/abs/1905.07830) | 0-shot |71.4 | 81.2 |
331
+ | [PIQA](https://arxiv.org/abs/1911.11641) | 0-shot | 77.3 | 81.2 |
332
+ | [SocialIQA](https://arxiv.org/abs/1904.09728) | 0-shot | 59.7 | 51.8 |
333
+ | [BooIQ](https://arxiv.org/abs/1905.10044) | 0-shot | 69.4 | 83.2 |
334
+ | [WinoGrande](https://arxiv.org/abs/1907.10641) | partial score | 65.4 | 72.3 |
335
+ | [CommonsenseQA](https://arxiv.org/abs/1811.00937) | 7-shot | 65.3 | 71.3 |
336
+ | [OpenBookQA](https://arxiv.org/abs/1809.02789) | | 47.8 | 52.8 |
337
+ | [ARC-e](https://arxiv.org/abs/1911.01547) | | 73.2 | 81.5 |
338
+ | [ARC-c](https://arxiv.org/abs/1911.01547) | | 42.1 | 53.2 |
339
+ | [TriviaQA](https://arxiv.org/abs/1705.03551) | 5-shot | 53.2 | 63.4 |
340
+ | [Natural Questions](https://github.com/google-research-datasets/natural-questions) | 5-shot | - | 23 |
341
+ | [HumanEval](https://arxiv.org/abs/2107.03374) | pass@1 | 22.0 | 32.3 |
342
+ | [MBPP](https://arxiv.org/abs/2108.07732) | 3-shot | 29.2 | 44.4 |
343
+ | [GSM8K](https://arxiv.org/abs/2110.14168) | maj@1 | 17.7 | 46.4 |
344
+ | [MATH](https://arxiv.org/abs/2108.07732) | 4-shot | 11.8 | 24.3 |
345
+ | [AGIEval](https://arxiv.org/abs/2304.06364) | | 24.2 | 41.7 |
346
+ | [BIG-Bench](https://arxiv.org/abs/2206.04615) | | 35.2 | 55.1 |
347
+ | ------------------------------ | ------------- | ----------- | --------- |
348
+ | **Average** | | **54.0** | **56.4** |
349
+
350
+ ## Ethics and Safety
351
+
352
+ Ethics and safety evaluation approach and results.
353
+
354
+ ### Evaluation Approach
355
+
356
+ Our evaluation methods include structured evaluations and internal red-teaming
357
+ testing of relevant content policies. Red-teaming was conducted by a number of
358
+ different teams, each with different goals and human evaluation metrics. These
359
+ models were evaluated against a number of different categories relevant to
360
+ ethics and safety, including:
361
+
362
+ * Text-to-Text Content Safety: Human evaluation on prompts covering safety
363
+ policies including child sexual abuse and exploitation, harassment, violence
364
+ and gore, and hate speech.
365
+ * Text-to-Text Representational Harms: Benchmark against relevant academic
366
+ datasets such as [WinoBias](https://arxiv.org/abs/1804.06876) and [BBQ Dataset](https://arxiv.org/abs/2110.08193v2).
367
+ * Memorization: Automated evaluation of memorization of training data, including
368
+ the risk of personally identifiable information exposure.
369
+ * Large-scale harm: Tests for "dangerous capabilities," such as chemical,
370
+ biological, radiological, and nuclear (CBRN) risks.
371
+
372
+ ### Evaluation Results
373
+
374
+ The results of ethics and safety evaluations are within acceptable thresholds
375
+ for meeting [internal policies](https://storage.googleapis.com/gweb-uniblog-publish-prod/documents/2023_Google_AI_Principles_Progress_Update.pdf#page=11) for categories such as child
376
+ safety, content safety, representational harms, memorization, large-scale harms.
377
+ On top of robust internal evaluations, the results of well known safety
378
+ benchmarks like BBQ, BOLD, Winogender, Winobias, RealToxicity, and TruthfulQA
379
+ are shown here.
380
+
381
+ | Benchmark | Metric | 2B Params | 7B Params |
382
+ | ------------------------------ | ------------- | ----------- | --------- |
383
+ | [RealToxicity](https://arxiv.org/abs/2009.11462) | average | 6.86 | 7.90 |
384
+ | [BOLD](https://arxiv.org/abs/2101.11718) | | 45.57 | 49.08 |
385
+ | [CrowS-Pairs](https://aclanthology.org/2020.emnlp-main.154/) | top-1 | 45.82 | 51.33 |
386
+ | [BBQ Ambig](https://arxiv.org/abs/2110.08193v2) | 1-shot, top-1 | 62.58 | 92.54 |
387
+ | [BBQ Disambig](https://arxiv.org/abs/2110.08193v2) | top-1 | 54.62 | 71.99 |
388
+ | [Winogender](https://arxiv.org/abs/1804.09301) | top-1 | 51.25 | 54.17 |
389
+ | [TruthfulQA](https://arxiv.org/abs/2109.07958) | | 44.84 | 31.81 |
390
+ | [Winobias 1_2](https://arxiv.org/abs/1804.06876) | | 56.12 | 59.09 |
391
+ | [Winobias 2_2](https://arxiv.org/abs/1804.06876) | | 91.10 | 92.23 |
392
+ | [Toxigen](https://arxiv.org/abs/2203.09509) | | 29.77 | 39.59 |
393
+ | ------------------------------ | ------------- | ----------- | --------- |
394
+
395
+
396
+ ## Usage and Limitations
397
+
398
+ These models have certain limitations that users should be aware of.
399
+
400
+ ### Intended Usage
401
+
402
+ Open Large Language Models (LLMs) have a wide range of applications across
403
+ various industries and domains. The following list of potential uses is not
404
+ comprehensive. The purpose of this list is to provide contextual information
405
+ about the possible use-cases that the model creators considered as part of model
406
+ training and development.
407
+
408
+ * Content Creation and Communication
409
+ * Text Generation: These models can be used to generate creative text formats
410
+ such as poems, scripts, code, marketing copy, and email drafts.
411
+ * Chatbots and Conversational AI: Power conversational interfaces for customer
412
+ service, virtual assistants, or interactive applications.
413
+ * Text Summarization: Generate concise summaries of a text corpus, research
414
+ papers, or reports.
415
+ * Research and Education
416
+ * Natural Language Processing (NLP) Research: These models can serve as a
417
+ foundation for researchers to experiment with NLP techniques, develop
418
+ algorithms, and contribute to the advancement of the field.
419
+ * Language Learning Tools: Support interactive language learning experiences,
420
+ aiding in grammar correction or providing writing practice.
421
+ * Knowledge Exploration: Assist researchers in exploring large bodies of text
422
+ by generating summaries or answering questions about specific topics.
423
+
424
+ ### Limitations
425
+
426
+ * Training Data
427
+ * The quality and diversity of the training data significantly influence the
428
+ model's capabilities. Biases or gaps in the training data can lead to
429
+ limitations in the model's responses.
430
+ * The scope of the training dataset determines the subject areas the model can
431
+ handle effectively.
432
+ * Context and Task Complexity
433
+ * LLMs are better at tasks that can be framed with clear prompts and
434
+ instructions. Open-ended or highly complex tasks might be challenging.
435
+ * A model's performance can be influenced by the amount of context provided
436
+ (longer context generally leads to better outputs, up to a certain point).
437
+ * Language Ambiguity and Nuance
438
+ * Natural language is inherently complex. LLMs might struggle to grasp subtle
439
+ nuances, sarcasm, or figurative language.
440
+ * Factual Accuracy
441
+ * LLMs generate responses based on information they learned from their
442
+ training datasets, but they are not knowledge bases. They may generate
443
+ incorrect or outdated factual statements.
444
+ * Common Sense
445
+ * LLMs rely on statistical patterns in language. They might lack the ability
446
+ to apply common sense reasoning in certain situations.
447
+
448
+ ### Ethical Considerations and Risks
449
+
450
+ The development of large language models (LLMs) raises several ethical concerns.
451
+ In creating an open model, we have carefully considered the following:
452
+
453
+ * Bias and Fairness
454
+ * LLMs trained on large-scale, real-world text data can reflect socio-cultural
455
+ biases embedded in the training material. These models underwent careful
456
+ scrutiny, input data pre-processing described and posterior evaluations
457
+ reported in this card.
458
+ * Misinformation and Misuse
459
+ * LLMs can be misused to generate text that is false, misleading, or harmful.
460
+ * Guidelines are provided for responsible use with the model, see the
461
+ [Responsible Generative AI Toolkit](http://ai.google.dev/gemma/responsible).
462
+ * Transparency and Accountability:
463
+ * This model card summarizes details on the models' architecture,
464
+ capabilities, limitations, and evaluation processes.
465
+ * A responsibly developed open model offers the opportunity to share
466
+ innovation by making LLM technology accessible to developers and researchers
467
+ across the AI ecosystem.
468
+
469
+ Risks identified and mitigations:
470
+
471
+ * Perpetuation of biases: It's encouraged to perform continuous monitoring
472
+ (using evaluation metrics, human review) and the exploration of de-biasing
473
+ techniques during model training, fine-tuning, and other use cases.
474
+ * Generation of harmful content: Mechanisms and guidelines for content safety
475
+ are essential. Developers are encouraged to exercise caution and implement
476
+ appropriate content safety safeguards based on their specific product policies
477
+ and application use cases.
478
+ * Misuse for malicious purposes: Technical limitations and developer and
479
+ end-user education can help mitigate against malicious applications of LLMs.
480
+ Educational resources and reporting mechanisms for users to flag misuse are
481
+ provided. Prohibited uses of Gemma models are outlined in the
482
+ [Gemma Prohibited Use Policy](https://ai.google.dev/gemma/prohibited_use_policy).
483
+ * Privacy violations: Models were trained on data filtered for removal of PII
484
+ (Personally Identifiable Information). Developers are encouraged to adhere to
485
+ privacy regulations with privacy-preserving techniques.
486
+
487
+ ### Benefits
488
+
489
+ At the time of release, this family of models provides high-performance open
490
+ large language model implementations designed from the ground up for Responsible
491
+ AI development compared to similarly sized models.
492
+
493
+ Using the benchmark evaluation metrics described in this document, these models
494
+ have shown to provide superior performance to other, comparably-sized open model
495
+ alternatives.
496
+
gemma-2b-it-Q3_K_L.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:01f5b22c477092a7d48c193231497ee27cf10668f62bf0e5a81672d9c342c2ad
3
+ size 1260907424
gemma-2b-it-Q3_K_M.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:711c687a3b1a7dea7cafbba7f7d404e3089289cc36d3f22f58499e6acbfd1d5a
3
+ size 1179118496
gemma-2b-it-Q3_K_S.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9925113b1253500232bd85521a934ec89982c1f356f28917b169f2a4c19553dd
3
+ size 1083296672
gemma-2b-it-Q4_K_M.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:76efa7b10f0b259dc1520d9b10b229cf0c5ced0dc5a58d71070930478062ea09
3
+ size 1495245728
gemma-2b-it-Q4_K_S.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a5c68b20b240c3770a2a681975281672658de43e3ed8a4426172c70d68cd4970
3
+ size 1424823200
gemma-2b-it-Q5_K_M.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d66429c2455e7aeac1b1053d11317fbc951e0f893d7ac62b89004251702e33e9
3
+ size 1770202016
gemma-2b-it-Q5_K_S.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ddfd2d0485d7397fd41c980ce9ccc9e417bd2a1720da5557164df4ba29dc8230
3
+ size 1729467296
gemma-2b-it-Q6_K.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:96824073d85c708f042588af4f22c8fb3a9e1630f21f7e8bc63777b88bcf8620
3
+ size 2062343072
gemma-2b-it-Q8_0.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ec68b50d23469882716782da8b680402246356c3f984e9a3b9bcc5bc15273140
3
+ size 2669351840
gemma-2b-it.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e858f591abebb789508f8fa4f1982bb7c818d8f711050f24d6e27a66fd6794d0
3
+ size 10031780704