thorirhrafn commited on
Commit
d634a5b
·
verified ·
1 Parent(s): f21e813

Training in progress, epoch 3

Browse files
README.md CHANGED
@@ -1,9 +1,8 @@
1
  ---
2
  license: apache-2.0
3
- library_name: peft
4
  tags:
5
  - generated_from_trainer
6
- base_model: AI-Sweden-Models/gpt-sw3-1.3b
7
  model-index:
8
  - name: gpt_icesum
9
  results: []
@@ -16,7 +15,7 @@ should probably proofread and complete it, then remove this comment. -->
16
 
17
  This model is a fine-tuned version of [AI-Sweden-Models/gpt-sw3-1.3b](https://huggingface.co/AI-Sweden-Models/gpt-sw3-1.3b) on the None dataset.
18
  It achieves the following results on the evaluation set:
19
- - Loss: 1.7838
20
 
21
  ## Model description
22
 
@@ -35,7 +34,7 @@ More information needed
35
  ### Training hyperparameters
36
 
37
  The following hyperparameters were used during training:
38
- - learning_rate: 5e-05
39
  - train_batch_size: 4
40
  - eval_batch_size: 4
41
  - seed: 42
@@ -47,21 +46,20 @@ The following hyperparameters were used during training:
47
 
48
  | Training Loss | Epoch | Step | Validation Loss |
49
  |:-------------:|:-----:|:----:|:---------------:|
50
- | 1.9006 | 0.22 | 50 | 1.8021 |
51
- | 1.907 | 0.44 | 100 | 1.7894 |
52
- | 1.815 | 0.67 | 150 | 1.7845 |
53
- | 2.0118 | 0.89 | 200 | 1.7850 |
54
- | 1.7555 | 1.11 | 250 | 1.7863 |
55
- | 1.8844 | 1.33 | 300 | 1.7857 |
56
- | 1.7689 | 1.56 | 350 | 1.7851 |
57
- | 1.7703 | 1.78 | 400 | 1.7838 |
58
- | 1.8758 | 2.0 | 450 | 1.7838 |
59
 
60
 
61
  ### Framework versions
62
 
63
- - PEFT 0.8.2
64
  - Transformers 4.38.1
65
  - Pytorch 2.2.0+cu118
66
  - Datasets 2.17.1
67
- - Tokenizers 0.15.2
 
1
  ---
2
  license: apache-2.0
3
+ base_model: AI-Sweden-Models/gpt-sw3-1.3b
4
  tags:
5
  - generated_from_trainer
 
6
  model-index:
7
  - name: gpt_icesum
8
  results: []
 
15
 
16
  This model is a fine-tuned version of [AI-Sweden-Models/gpt-sw3-1.3b](https://huggingface.co/AI-Sweden-Models/gpt-sw3-1.3b) on the None dataset.
17
  It achieves the following results on the evaluation set:
18
+ - Loss: 1.7960
19
 
20
  ## Model description
21
 
 
34
  ### Training hyperparameters
35
 
36
  The following hyperparameters were used during training:
37
+ - learning_rate: 1e-05
38
  - train_batch_size: 4
39
  - eval_batch_size: 4
40
  - seed: 42
 
46
 
47
  | Training Loss | Epoch | Step | Validation Loss |
48
  |:-------------:|:-----:|:----:|:---------------:|
49
+ | 1.9148 | 0.22 | 50 | 1.8159 |
50
+ | 1.9237 | 0.44 | 100 | 1.8051 |
51
+ | 1.8317 | 0.67 | 150 | 1.8006 |
52
+ | 2.0264 | 0.89 | 200 | 1.7985 |
53
+ | 1.81 | 1.11 | 250 | 1.7961 |
54
+ | 1.9393 | 1.33 | 300 | 1.7951 |
55
+ | 1.8159 | 1.56 | 350 | 1.7934 |
56
+ | 1.8204 | 1.78 | 400 | 1.7959 |
57
+ | 1.9231 | 2.0 | 450 | 1.7960 |
58
 
59
 
60
  ### Framework versions
61
 
 
62
  - Transformers 4.38.1
63
  - Pytorch 2.2.0+cu118
64
  - Datasets 2.17.1
65
+ - Tokenizers 0.15.2
adapter_config.json CHANGED
@@ -9,13 +9,13 @@
9
  "layers_pattern": null,
10
  "layers_to_transform": null,
11
  "loftq_config": {},
12
- "lora_alpha": 512,
13
  "lora_dropout": 0.1,
14
  "megatron_config": null,
15
  "megatron_core": "megatron.core",
16
  "modules_to_save": null,
17
  "peft_type": "LORA",
18
- "r": 512,
19
  "rank_pattern": {},
20
  "revision": null,
21
  "target_modules": [
 
9
  "layers_pattern": null,
10
  "layers_to_transform": null,
11
  "loftq_config": {},
12
+ "lora_alpha": 1024,
13
  "lora_dropout": 0.1,
14
  "megatron_config": null,
15
  "megatron_core": "megatron.core",
16
  "modules_to_save": null,
17
  "peft_type": "LORA",
18
+ "r": 1024,
19
  "rank_pattern": {},
20
  "revision": null,
21
  "target_modules": [
adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5191f86f73f75eaadefb7b52bef891a798b837d288cadaa99f589348c1e599ec
3
- size 201333040
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aea17e760ccaa177a4552a0a7e843a004d2cd263e24ceabd7aa70cc8934a548a
3
+ size 402659704
config.json ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "AI-Sweden-Models/gpt-sw3-1.3b",
3
+ "activation_function": "gelu",
4
+ "apply_query_key_layer_scaling": true,
5
+ "architectures": [
6
+ "GPT2LMHeadModel"
7
+ ],
8
+ "attn_pdrop": 0.1,
9
+ "bos_token_id": 2,
10
+ "embd_pdrop": 0.1,
11
+ "eos_token_id": 3,
12
+ "initializer_range": 0.01,
13
+ "layer_norm_epsilon": 1e-05,
14
+ "model_type": "gpt2",
15
+ "n_ctx": 2048,
16
+ "n_embd": 2048,
17
+ "n_head": 32,
18
+ "n_inner": 8192,
19
+ "n_layer": 24,
20
+ "n_positions": 2048,
21
+ "normalize_attention_scores": true,
22
+ "pad_token_id": 0,
23
+ "reorder_and_upcast_attn": false,
24
+ "resid_pdrop": 0.1,
25
+ "scale_attn_by_inverse_layer_idx": false,
26
+ "scale_attn_weights": true,
27
+ "summary_activation": null,
28
+ "summary_first_dropout": 0.1,
29
+ "summary_proj_to_labels": true,
30
+ "summary_type": "cls_index",
31
+ "summary_use_proj": true,
32
+ "tokenizer_class": "GPTSw3Tokenizer",
33
+ "torch_dtype": "bfloat16",
34
+ "transformers_version": "4.38.1",
35
+ "use_cache": false,
36
+ "vocab_size": 64000
37
+ }
generation_config.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 2,
4
+ "eos_token_id": 3,
5
+ "pad_token_id": 0,
6
+ "transformers_version": "4.38.1",
7
+ "use_cache": false
8
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8974be470606030125c39d48bc73e65ae112771a3d00d092853186840549b159
3
+ size 2889101840
runs/Mar25_15-40-59_gpu-4/events.out.tfevents.1711381259.gpu-4.353385.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a0dcca76995e984623df7398a38a1e7add99b20bd780fa12061ab6b0ff1a8bca
3
+ size 26546
runs/Mar25_15-51-22_gpu-4/events.out.tfevents.1711381883.gpu-4.353758.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ed307a5f731e995e650bdfa1c9227b569e1e1364cb6b4e5f1bb7dbf99b96a1c4
3
+ size 37125
runs/Mar25_16-20-40_gpu-4/events.out.tfevents.1711383641.gpu-4.354359.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9de70860d9f31fd20d1d76380dfd389e604666b897bba568e0a77e3cce58cdd7
3
+ size 37125
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:35426744f7e2ea623fe429a0fdfb12d139932b3f2bf8bc8cb260147ec97a0cc0
3
  size 4920
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:19f6e5844e102fa35f763b9a71f2d6fbe159f5876ebaa9c1917a40b59c5307f1
3
  size 4920