h4rz3rk4s3 commited on
Commit
052fb7c
1 Parent(s): f448a61

Upload folder using huggingface_hub

Browse files
Files changed (4) hide show
  1. README.md +10 -6
  2. config.json +3 -18
  3. generation_config.json +1 -1
  4. model.safetensors +2 -2
README.md CHANGED
@@ -1,19 +1,17 @@
1
  ---
2
  license: apache-2.0
3
- base_model: TinyLlama/TinyLlama-1.1B-Chat-v1.0
4
  tags:
5
  - TinyLlama
6
  - QLoRA
7
  - Politics
8
  - EU
9
  - sft
10
- language:
11
- - en
12
  ---
13
 
14
  # TinyParlaMintLlama-1.1B
15
 
16
- TinyParlaMintLlama-1.1B is a QLoRA SFT fine-tune of [TinyLlama/TinyLlama-1.1B-Chat-v1.0](https://huggingface.co/TinyLlama/TinyLlama-1.1B-Chat-v1.0) using a sample of a concentrated version of the English [ParlaMint] (https://www.clarin.si/repository/xmlui/handle/11356/1864) Dataset. The model was fine-tuned for ~12h on one A100 40GB on ~125M tokens.
17
 
18
  The goal of this project is to study the potential for improving the domain-specific (in this case political) knowledge of small (<3B) LLMs by concentrating the training datasets TF-IDF in respect to the underlying Topics found in the origianl Dataset.
19
 
@@ -24,7 +22,7 @@ The used training data contains speeches from the **Austrian**, **Danish**, **Fr
24
 
25
  ```python
26
  !pip install -qU transformers accelerate
27
- from transformers import AutoTokenizer
28
  import transformers
29
  import torch
30
  model = "h4rz3rk4s3/TinyParlaMintLlama-1.1B"
@@ -38,10 +36,16 @@ messages = [
38
 
39
  tokenizer = AutoTokenizer.from_pretrained(model)
40
  prompt = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
 
 
 
 
41
  pipeline = transformers.pipeline(
42
  "text-generation",
 
43
  model=model,
44
- device_map="auto",
 
45
  )
46
  outputs = pipeline(prompt, max_new_tokens=256, do_sample=True, temperature=0.7, top_k=50, top_p=0.95)
47
  print(outputs[0]["generated_text"])
 
1
  ---
2
  license: apache-2.0
 
3
  tags:
4
  - TinyLlama
5
  - QLoRA
6
  - Politics
7
  - EU
8
  - sft
9
+ base_model: TinyLlama/TinyLlama-1.1B-Chat-v1.0
 
10
  ---
11
 
12
  # TinyParlaMintLlama-1.1B
13
 
14
+ TinyParlaMintLlama-1.1B is a SFT fine-tune of [TinyLlama/TinyLlama-1.1B-Chat-v1.0](https://huggingface.co/TinyLlama/TinyLlama-1.1B-Chat-v1.0) using a sample of a concentrated version of the English [ParlaMint] (https://www.clarin.si/repository/xmlui/handle/11356/1864) Dataset using QLoRA. The model was fine-tuned for ~12h on one A100 40GB on ~100M tokens.
15
 
16
  The goal of this project is to study the potential for improving the domain-specific (in this case political) knowledge of small (<3B) LLMs by concentrating the training datasets TF-IDF in respect to the underlying Topics found in the origianl Dataset.
17
 
 
22
 
23
  ```python
24
  !pip install -qU transformers accelerate
25
+ from transformers import AutoTokenizer, AutoModelForCausalLM
26
  import transformers
27
  import torch
28
  model = "h4rz3rk4s3/TinyParlaMintLlama-1.1B"
 
36
 
37
  tokenizer = AutoTokenizer.from_pretrained(model)
38
  prompt = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
39
+ model = AutoModelForCausalLM.from_pretrained(
40
+ model, trust_remote_code=True, device_map={"": Accelerator().process_index}
41
+ )
42
+
43
  pipeline = transformers.pipeline(
44
  "text-generation",
45
+ tokenizer=tokenizer,
46
  model=model,
47
+ torch_dtype=torch.float16,
48
+ device_map={"": Accelerator().process_index},
49
  )
50
  outputs = pipeline(prompt, max_new_tokens=256, do_sample=True, temperature=0.7, top_k=50, top_p=0.95)
51
  print(outputs[0]["generated_text"])
config.json CHANGED
@@ -16,27 +16,12 @@
16
  "num_attention_heads": 32,
17
  "num_hidden_layers": 22,
18
  "num_key_value_heads": 4,
19
- "pretraining_tp": 1,
20
- "quantization_config": {
21
- "_load_in_4bit": true,
22
- "_load_in_8bit": false,
23
- "bnb_4bit_compute_dtype": "bfloat16",
24
- "bnb_4bit_quant_type": "nf4",
25
- "bnb_4bit_use_double_quant": false,
26
- "llm_int8_enable_fp32_cpu_offload": false,
27
- "llm_int8_has_fp16_weight": false,
28
- "llm_int8_skip_modules": null,
29
- "llm_int8_threshold": 6.0,
30
- "load_in_4bit": true,
31
- "load_in_8bit": false,
32
- "quant_method": "bitsandbytes"
33
- },
34
  "rms_norm_eps": 1e-05,
35
  "rope_scaling": null,
36
  "rope_theta": 10000.0,
37
  "tie_word_embeddings": false,
38
- "torch_dtype": "float16",
39
- "transformers_version": "4.38.0.dev0",
40
  "use_cache": true,
41
  "vocab_size": 32000
42
- }
 
16
  "num_attention_heads": 32,
17
  "num_hidden_layers": 22,
18
  "num_key_value_heads": 4,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
19
  "rms_norm_eps": 1e-05,
20
  "rope_scaling": null,
21
  "rope_theta": 10000.0,
22
  "tie_word_embeddings": false,
23
+ "torch_dtype": "bfloat16",
24
+ "transformers_version": "4.38.0",
25
  "use_cache": true,
26
  "vocab_size": 32000
27
+ }
generation_config.json CHANGED
@@ -3,5 +3,5 @@
3
  "eos_token_id": 2,
4
  "max_length": 2048,
5
  "pad_token_id": 0,
6
- "transformers_version": "4.38.0.dev0"
7
  }
 
3
  "eos_token_id": 2,
4
  "max_length": 2048,
5
  "pad_token_id": 0,
6
+ "transformers_version": "4.38.0"
7
  }
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:59b91068db6d7cf982aa85cdb5a1e69db86a439420c79dffa7a5d04f34f45419
3
- size 807426286
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:db30ca6110845908b747a44327b67440a0722e591009392152a815f5de622bfc
3
+ size 2200119864