sam2ai commited on
Commit
4bfce10
·
1 Parent(s): 231d872

update model v1.1

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. README.md +19 -1
  2. adapter_config.json +26 -0
  3. adapter_model.bin +3 -0
  4. added_tokens.json +0 -0
  5. checkpoint-4200/README.md +21 -0
  6. checkpoint-4200/adapter_config.json +26 -0
  7. checkpoint-4200/adapter_model.bin +3 -0
  8. checkpoint-4200/adapter_model/README.md +21 -0
  9. checkpoint-4200/adapter_model/adapter_config.json +26 -0
  10. checkpoint-4200/adapter_model/adapter_model.bin +3 -0
  11. checkpoint-4200/optimizer.pt +3 -0
  12. checkpoint-4200/rng_state.pth +3 -0
  13. checkpoint-4200/scheduler.pt +3 -0
  14. checkpoint-4200/trainer_state.json +0 -0
  15. checkpoint-4200/training_args.bin +3 -0
  16. checkpoint-4300/README.md +21 -0
  17. checkpoint-4300/adapter_config.json +26 -0
  18. checkpoint-4300/adapter_model.bin +3 -0
  19. checkpoint-4300/adapter_model/README.md +21 -0
  20. checkpoint-4300/adapter_model/adapter_config.json +26 -0
  21. checkpoint-4300/adapter_model/adapter_model.bin +3 -0
  22. checkpoint-4300/optimizer.pt +3 -0
  23. checkpoint-4300/rng_state.pth +3 -0
  24. checkpoint-4300/scheduler.pt +3 -0
  25. checkpoint-4300/trainer_state.json +0 -0
  26. checkpoint-4300/training_args.bin +3 -0
  27. checkpoint-4400/README.md +21 -0
  28. checkpoint-4400/adapter_config.json +26 -0
  29. checkpoint-4400/adapter_model.bin +3 -0
  30. checkpoint-4400/adapter_model/README.md +21 -0
  31. checkpoint-4400/adapter_model/adapter_config.json +26 -0
  32. checkpoint-4400/adapter_model/adapter_model.bin +3 -0
  33. checkpoint-4400/optimizer.pt +3 -0
  34. checkpoint-4400/rng_state.pth +3 -0
  35. checkpoint-4400/scheduler.pt +3 -0
  36. checkpoint-4400/trainer_state.json +0 -0
  37. checkpoint-4400/training_args.bin +3 -0
  38. checkpoint-4500/README.md +21 -0
  39. checkpoint-4500/adapter_config.json +26 -0
  40. checkpoint-4500/adapter_model.bin +3 -0
  41. checkpoint-4500/adapter_model/README.md +21 -0
  42. checkpoint-4500/adapter_model/adapter_config.json +26 -0
  43. checkpoint-4500/adapter_model/adapter_model.bin +3 -0
  44. checkpoint-4500/optimizer.pt +3 -0
  45. checkpoint-4500/rng_state.pth +3 -0
  46. checkpoint-4500/scheduler.pt +3 -0
  47. checkpoint-4500/trainer_state.json +0 -0
  48. checkpoint-4500/training_args.bin +3 -0
  49. merged/added_tokens.json +0 -0
  50. merged/config.json +35 -0
README.md CHANGED
@@ -1,3 +1,21 @@
1
  ---
2
- license: apache-2.0
3
  ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
+ library_name: peft
3
  ---
4
+ ## Training procedure
5
+
6
+
7
+ The following `bitsandbytes` quantization config was used during training:
8
+ - quant_method: bitsandbytes
9
+ - load_in_8bit: True
10
+ - load_in_4bit: False
11
+ - llm_int8_threshold: 6.0
12
+ - llm_int8_skip_modules: None
13
+ - llm_int8_enable_fp32_cpu_offload: False
14
+ - llm_int8_has_fp16_weight: False
15
+ - bnb_4bit_quant_type: fp4
16
+ - bnb_4bit_use_double_quant: False
17
+ - bnb_4bit_compute_dtype: float32
18
+ ### Framework versions
19
+
20
+
21
+ - PEFT 0.6.0.dev0
adapter_config.json ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "auto_mapping": null,
3
+ "base_model_name_or_path": "sam2ai/falcon-extend-odia-1B",
4
+ "bias": "none",
5
+ "fan_in_fan_out": null,
6
+ "inference_mode": true,
7
+ "init_lora_weights": true,
8
+ "layers_pattern": null,
9
+ "layers_to_transform": null,
10
+ "lora_alpha": 32,
11
+ "lora_dropout": 0.0,
12
+ "modules_to_save": [
13
+ "word_embeddings",
14
+ "lm_head"
15
+ ],
16
+ "peft_type": "LORA",
17
+ "r": 16,
18
+ "revision": null,
19
+ "target_modules": [
20
+ "dense_4h_to_h",
21
+ "query_key_value",
22
+ "dense_h_to_4h",
23
+ "dense"
24
+ ],
25
+ "task_type": "CAUSAL_LM"
26
+ }
adapter_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:db4649ebda64f8104873b766de2cac33ed85af15b186ff7f75a1430888d2bef0
3
+ size 944870613
added_tokens.json ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-4200/README.md ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: peft
3
+ ---
4
+ ## Training procedure
5
+
6
+
7
+ The following `bitsandbytes` quantization config was used during training:
8
+ - quant_method: bitsandbytes
9
+ - load_in_8bit: True
10
+ - load_in_4bit: False
11
+ - llm_int8_threshold: 6.0
12
+ - llm_int8_skip_modules: None
13
+ - llm_int8_enable_fp32_cpu_offload: False
14
+ - llm_int8_has_fp16_weight: False
15
+ - bnb_4bit_quant_type: fp4
16
+ - bnb_4bit_use_double_quant: False
17
+ - bnb_4bit_compute_dtype: float32
18
+ ### Framework versions
19
+
20
+
21
+ - PEFT 0.6.0.dev0
checkpoint-4200/adapter_config.json ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "auto_mapping": null,
3
+ "base_model_name_or_path": "sam2ai/falcon-extend-odia-1B",
4
+ "bias": "none",
5
+ "fan_in_fan_out": null,
6
+ "inference_mode": true,
7
+ "init_lora_weights": true,
8
+ "layers_pattern": null,
9
+ "layers_to_transform": null,
10
+ "lora_alpha": 32,
11
+ "lora_dropout": 0.0,
12
+ "modules_to_save": [
13
+ "word_embeddings",
14
+ "lm_head"
15
+ ],
16
+ "peft_type": "LORA",
17
+ "r": 16,
18
+ "revision": null,
19
+ "target_modules": [
20
+ "dense_4h_to_h",
21
+ "query_key_value",
22
+ "dense_h_to_4h",
23
+ "dense"
24
+ ],
25
+ "task_type": "CAUSAL_LM"
26
+ }
checkpoint-4200/adapter_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:353cafb1f3d7cd41fa60e6fd4b96933853b5d188a25feb44c21cdfaf57f0b529
3
+ size 944870613
checkpoint-4200/adapter_model/README.md ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: peft
3
+ ---
4
+ ## Training procedure
5
+
6
+
7
+ The following `bitsandbytes` quantization config was used during training:
8
+ - quant_method: bitsandbytes
9
+ - load_in_8bit: True
10
+ - load_in_4bit: False
11
+ - llm_int8_threshold: 6.0
12
+ - llm_int8_skip_modules: None
13
+ - llm_int8_enable_fp32_cpu_offload: False
14
+ - llm_int8_has_fp16_weight: False
15
+ - bnb_4bit_quant_type: fp4
16
+ - bnb_4bit_use_double_quant: False
17
+ - bnb_4bit_compute_dtype: float32
18
+ ### Framework versions
19
+
20
+
21
+ - PEFT 0.6.0.dev0
checkpoint-4200/adapter_model/adapter_config.json ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "auto_mapping": null,
3
+ "base_model_name_or_path": "sam2ai/falcon-extend-odia-1B",
4
+ "bias": "none",
5
+ "fan_in_fan_out": null,
6
+ "inference_mode": true,
7
+ "init_lora_weights": true,
8
+ "layers_pattern": null,
9
+ "layers_to_transform": null,
10
+ "lora_alpha": 32,
11
+ "lora_dropout": 0.0,
12
+ "modules_to_save": [
13
+ "word_embeddings",
14
+ "lm_head"
15
+ ],
16
+ "peft_type": "LORA",
17
+ "r": 16,
18
+ "revision": null,
19
+ "target_modules": [
20
+ "dense_4h_to_h",
21
+ "query_key_value",
22
+ "dense_h_to_4h",
23
+ "dense"
24
+ ],
25
+ "task_type": "CAUSAL_LM"
26
+ }
checkpoint-4200/adapter_model/adapter_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:353cafb1f3d7cd41fa60e6fd4b96933853b5d188a25feb44c21cdfaf57f0b529
3
+ size 944870613
checkpoint-4200/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:38b09cecbb92c42b69432006dd4c938cf4bce926e8d10d05da2d1bcbc4c29a3e
3
+ size 473550021
checkpoint-4200/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0549ffe3f8f11d23149dbabc9ee30eae5ffe34d8592388db70037341ac909988
3
+ size 14575
checkpoint-4200/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5e3bb019dd16f2307016a33b69ad031d4540a431065366dc837ea05563af1314
3
+ size 627
checkpoint-4200/trainer_state.json ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-4200/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a88651639d77e6f86a5f62efebc899c26ec0e20cb4321d575da0ee0d9922d06c
3
+ size 4219
checkpoint-4300/README.md ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: peft
3
+ ---
4
+ ## Training procedure
5
+
6
+
7
+ The following `bitsandbytes` quantization config was used during training:
8
+ - quant_method: bitsandbytes
9
+ - load_in_8bit: True
10
+ - load_in_4bit: False
11
+ - llm_int8_threshold: 6.0
12
+ - llm_int8_skip_modules: None
13
+ - llm_int8_enable_fp32_cpu_offload: False
14
+ - llm_int8_has_fp16_weight: False
15
+ - bnb_4bit_quant_type: fp4
16
+ - bnb_4bit_use_double_quant: False
17
+ - bnb_4bit_compute_dtype: float32
18
+ ### Framework versions
19
+
20
+
21
+ - PEFT 0.6.0.dev0
checkpoint-4300/adapter_config.json ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "auto_mapping": null,
3
+ "base_model_name_or_path": "sam2ai/falcon-extend-odia-1B",
4
+ "bias": "none",
5
+ "fan_in_fan_out": null,
6
+ "inference_mode": true,
7
+ "init_lora_weights": true,
8
+ "layers_pattern": null,
9
+ "layers_to_transform": null,
10
+ "lora_alpha": 32,
11
+ "lora_dropout": 0.0,
12
+ "modules_to_save": [
13
+ "word_embeddings",
14
+ "lm_head"
15
+ ],
16
+ "peft_type": "LORA",
17
+ "r": 16,
18
+ "revision": null,
19
+ "target_modules": [
20
+ "dense_4h_to_h",
21
+ "query_key_value",
22
+ "dense_h_to_4h",
23
+ "dense"
24
+ ],
25
+ "task_type": "CAUSAL_LM"
26
+ }
checkpoint-4300/adapter_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4a3ea0e283ffd3c66c3cdef2d8b3aa53ad28f89434c94814121777571fffd713
3
+ size 944870613
checkpoint-4300/adapter_model/README.md ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: peft
3
+ ---
4
+ ## Training procedure
5
+
6
+
7
+ The following `bitsandbytes` quantization config was used during training:
8
+ - quant_method: bitsandbytes
9
+ - load_in_8bit: True
10
+ - load_in_4bit: False
11
+ - llm_int8_threshold: 6.0
12
+ - llm_int8_skip_modules: None
13
+ - llm_int8_enable_fp32_cpu_offload: False
14
+ - llm_int8_has_fp16_weight: False
15
+ - bnb_4bit_quant_type: fp4
16
+ - bnb_4bit_use_double_quant: False
17
+ - bnb_4bit_compute_dtype: float32
18
+ ### Framework versions
19
+
20
+
21
+ - PEFT 0.6.0.dev0
checkpoint-4300/adapter_model/adapter_config.json ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "auto_mapping": null,
3
+ "base_model_name_or_path": "sam2ai/falcon-extend-odia-1B",
4
+ "bias": "none",
5
+ "fan_in_fan_out": null,
6
+ "inference_mode": true,
7
+ "init_lora_weights": true,
8
+ "layers_pattern": null,
9
+ "layers_to_transform": null,
10
+ "lora_alpha": 32,
11
+ "lora_dropout": 0.0,
12
+ "modules_to_save": [
13
+ "word_embeddings",
14
+ "lm_head"
15
+ ],
16
+ "peft_type": "LORA",
17
+ "r": 16,
18
+ "revision": null,
19
+ "target_modules": [
20
+ "dense_4h_to_h",
21
+ "query_key_value",
22
+ "dense_h_to_4h",
23
+ "dense"
24
+ ],
25
+ "task_type": "CAUSAL_LM"
26
+ }
checkpoint-4300/adapter_model/adapter_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4a3ea0e283ffd3c66c3cdef2d8b3aa53ad28f89434c94814121777571fffd713
3
+ size 944870613
checkpoint-4300/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:712db639b784ea5983177a309fa864003dd9c1052256b73ab4690678ebb6fc79
3
+ size 473550021
checkpoint-4300/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9792e1c65bc3e212da561c7100995c9162f463005027df00d8089cbb2db71537
3
+ size 14575
checkpoint-4300/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9a8d21f13c6b72c564beaac82ae459b6c003076f374620877904c42e8f9274cd
3
+ size 627
checkpoint-4300/trainer_state.json ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-4300/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a88651639d77e6f86a5f62efebc899c26ec0e20cb4321d575da0ee0d9922d06c
3
+ size 4219
checkpoint-4400/README.md ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: peft
3
+ ---
4
+ ## Training procedure
5
+
6
+
7
+ The following `bitsandbytes` quantization config was used during training:
8
+ - quant_method: bitsandbytes
9
+ - load_in_8bit: True
10
+ - load_in_4bit: False
11
+ - llm_int8_threshold: 6.0
12
+ - llm_int8_skip_modules: None
13
+ - llm_int8_enable_fp32_cpu_offload: False
14
+ - llm_int8_has_fp16_weight: False
15
+ - bnb_4bit_quant_type: fp4
16
+ - bnb_4bit_use_double_quant: False
17
+ - bnb_4bit_compute_dtype: float32
18
+ ### Framework versions
19
+
20
+
21
+ - PEFT 0.6.0.dev0
checkpoint-4400/adapter_config.json ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "auto_mapping": null,
3
+ "base_model_name_or_path": "sam2ai/falcon-extend-odia-1B",
4
+ "bias": "none",
5
+ "fan_in_fan_out": null,
6
+ "inference_mode": true,
7
+ "init_lora_weights": true,
8
+ "layers_pattern": null,
9
+ "layers_to_transform": null,
10
+ "lora_alpha": 32,
11
+ "lora_dropout": 0.0,
12
+ "modules_to_save": [
13
+ "word_embeddings",
14
+ "lm_head"
15
+ ],
16
+ "peft_type": "LORA",
17
+ "r": 16,
18
+ "revision": null,
19
+ "target_modules": [
20
+ "dense_4h_to_h",
21
+ "query_key_value",
22
+ "dense_h_to_4h",
23
+ "dense"
24
+ ],
25
+ "task_type": "CAUSAL_LM"
26
+ }
checkpoint-4400/adapter_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:73115098fed13be40e2a384796b3744f6abcbf0e50db004db6a2a75959422208
3
+ size 944870613
checkpoint-4400/adapter_model/README.md ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: peft
3
+ ---
4
+ ## Training procedure
5
+
6
+
7
+ The following `bitsandbytes` quantization config was used during training:
8
+ - quant_method: bitsandbytes
9
+ - load_in_8bit: True
10
+ - load_in_4bit: False
11
+ - llm_int8_threshold: 6.0
12
+ - llm_int8_skip_modules: None
13
+ - llm_int8_enable_fp32_cpu_offload: False
14
+ - llm_int8_has_fp16_weight: False
15
+ - bnb_4bit_quant_type: fp4
16
+ - bnb_4bit_use_double_quant: False
17
+ - bnb_4bit_compute_dtype: float32
18
+ ### Framework versions
19
+
20
+
21
+ - PEFT 0.6.0.dev0
checkpoint-4400/adapter_model/adapter_config.json ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "auto_mapping": null,
3
+ "base_model_name_or_path": "sam2ai/falcon-extend-odia-1B",
4
+ "bias": "none",
5
+ "fan_in_fan_out": null,
6
+ "inference_mode": true,
7
+ "init_lora_weights": true,
8
+ "layers_pattern": null,
9
+ "layers_to_transform": null,
10
+ "lora_alpha": 32,
11
+ "lora_dropout": 0.0,
12
+ "modules_to_save": [
13
+ "word_embeddings",
14
+ "lm_head"
15
+ ],
16
+ "peft_type": "LORA",
17
+ "r": 16,
18
+ "revision": null,
19
+ "target_modules": [
20
+ "dense_4h_to_h",
21
+ "query_key_value",
22
+ "dense_h_to_4h",
23
+ "dense"
24
+ ],
25
+ "task_type": "CAUSAL_LM"
26
+ }
checkpoint-4400/adapter_model/adapter_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:73115098fed13be40e2a384796b3744f6abcbf0e50db004db6a2a75959422208
3
+ size 944870613
checkpoint-4400/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:eaacc7ba3f9ed2be8ccb6c8b8c9e398b836e0773d0a59fd6f1a55ddc37ffcb45
3
+ size 473550021
checkpoint-4400/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b32bbef891dbdd3524f32551a03a56bb081b6c86879cea3d24ae28b3c8f4bc5f
3
+ size 14575
checkpoint-4400/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:560d45bdb08d4dc7b0acfe14e250bc38780d59e465e441aacc6647b05e4ab9a6
3
+ size 627
checkpoint-4400/trainer_state.json ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-4400/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a88651639d77e6f86a5f62efebc899c26ec0e20cb4321d575da0ee0d9922d06c
3
+ size 4219
checkpoint-4500/README.md ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: peft
3
+ ---
4
+ ## Training procedure
5
+
6
+
7
+ The following `bitsandbytes` quantization config was used during training:
8
+ - quant_method: bitsandbytes
9
+ - load_in_8bit: True
10
+ - load_in_4bit: False
11
+ - llm_int8_threshold: 6.0
12
+ - llm_int8_skip_modules: None
13
+ - llm_int8_enable_fp32_cpu_offload: False
14
+ - llm_int8_has_fp16_weight: False
15
+ - bnb_4bit_quant_type: fp4
16
+ - bnb_4bit_use_double_quant: False
17
+ - bnb_4bit_compute_dtype: float32
18
+ ### Framework versions
19
+
20
+
21
+ - PEFT 0.6.0.dev0
checkpoint-4500/adapter_config.json ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "auto_mapping": null,
3
+ "base_model_name_or_path": "sam2ai/falcon-extend-odia-1B",
4
+ "bias": "none",
5
+ "fan_in_fan_out": null,
6
+ "inference_mode": true,
7
+ "init_lora_weights": true,
8
+ "layers_pattern": null,
9
+ "layers_to_transform": null,
10
+ "lora_alpha": 32,
11
+ "lora_dropout": 0.0,
12
+ "modules_to_save": [
13
+ "word_embeddings",
14
+ "lm_head"
15
+ ],
16
+ "peft_type": "LORA",
17
+ "r": 16,
18
+ "revision": null,
19
+ "target_modules": [
20
+ "dense_4h_to_h",
21
+ "query_key_value",
22
+ "dense_h_to_4h",
23
+ "dense"
24
+ ],
25
+ "task_type": "CAUSAL_LM"
26
+ }
checkpoint-4500/adapter_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8daa183cee897b649cb966cbaaa148ea1d54104ca60c798539a14ab6e790c879
3
+ size 944870613
checkpoint-4500/adapter_model/README.md ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: peft
3
+ ---
4
+ ## Training procedure
5
+
6
+
7
+ The following `bitsandbytes` quantization config was used during training:
8
+ - quant_method: bitsandbytes
9
+ - load_in_8bit: True
10
+ - load_in_4bit: False
11
+ - llm_int8_threshold: 6.0
12
+ - llm_int8_skip_modules: None
13
+ - llm_int8_enable_fp32_cpu_offload: False
14
+ - llm_int8_has_fp16_weight: False
15
+ - bnb_4bit_quant_type: fp4
16
+ - bnb_4bit_use_double_quant: False
17
+ - bnb_4bit_compute_dtype: float32
18
+ ### Framework versions
19
+
20
+
21
+ - PEFT 0.6.0.dev0
checkpoint-4500/adapter_model/adapter_config.json ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "auto_mapping": null,
3
+ "base_model_name_or_path": "sam2ai/falcon-extend-odia-1B",
4
+ "bias": "none",
5
+ "fan_in_fan_out": null,
6
+ "inference_mode": true,
7
+ "init_lora_weights": true,
8
+ "layers_pattern": null,
9
+ "layers_to_transform": null,
10
+ "lora_alpha": 32,
11
+ "lora_dropout": 0.0,
12
+ "modules_to_save": [
13
+ "word_embeddings",
14
+ "lm_head"
15
+ ],
16
+ "peft_type": "LORA",
17
+ "r": 16,
18
+ "revision": null,
19
+ "target_modules": [
20
+ "dense_4h_to_h",
21
+ "query_key_value",
22
+ "dense_h_to_4h",
23
+ "dense"
24
+ ],
25
+ "task_type": "CAUSAL_LM"
26
+ }
checkpoint-4500/adapter_model/adapter_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8daa183cee897b649cb966cbaaa148ea1d54104ca60c798539a14ab6e790c879
3
+ size 944870613
checkpoint-4500/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ac8ff49b788a7685e1c31a067517183c3decc2043e016e6bd325adaf27d49a7f
3
+ size 473550021
checkpoint-4500/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e8a3cf9fa8242eff8140a79dadbeff2dc7684d9dcca2468e4961a1204d427d1d
3
+ size 14575
checkpoint-4500/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:75ae16dfa38cdd898f8930625c03ddb0d2a7249a2921c37d5b403a3cc48bc7e3
3
+ size 627
checkpoint-4500/trainer_state.json ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-4500/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a88651639d77e6f86a5f62efebc899c26ec0e20cb4321d575da0ee0d9922d06c
3
+ size 4219
merged/added_tokens.json ADDED
The diff for this file is too large to render. See raw diff
 
merged/config.json ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "sam2ai/falcon-extend-odia-1B",
3
+ "alibi": true,
4
+ "apply_residual_connection_post_layernorm": false,
5
+ "architectures": [
6
+ "FalconForCausalLM"
7
+ ],
8
+ "attention_dropout": 0.0,
9
+ "auto_map": {
10
+ "AutoConfig": "tiiuae/falcon-rw-1b--configuration_falcon.FalconConfig",
11
+ "AutoModel": "tiiuae/falcon-rw-1b--modeling_falcon.FalconModel",
12
+ "AutoModelForCausalLM": "tiiuae/falcon-rw-1b--modeling_falcon.FalconForCausalLM",
13
+ "AutoModelForQuestionAnswering": "tiiuae/falcon-rw-1b--modeling_falcon.FalconForQuestionAnswering",
14
+ "AutoModelForSequenceClassification": "tiiuae/falcon-rw-1b--modeling_falcon.FalconForSequenceClassification",
15
+ "AutoModelForTokenClassification": "tiiuae/falcon-rw-1b--modeling_falcon.FalconForTokenClassification"
16
+ },
17
+ "bias": true,
18
+ "bos_token_id": 1,
19
+ "eos_token_id": 2,
20
+ "hidden_dropout": 0.0,
21
+ "hidden_size": 2048,
22
+ "initializer_range": 0.02,
23
+ "layer_norm_epsilon": 1e-05,
24
+ "model_type": "falcon",
25
+ "multi_query": false,
26
+ "new_decoder_architecture": false,
27
+ "num_attention_heads": 32,
28
+ "num_hidden_layers": 24,
29
+ "num_kv_heads": 32,
30
+ "parallel_attn": false,
31
+ "torch_dtype": "float16",
32
+ "transformers_version": "4.33.0.dev0",
33
+ "use_cache": false,
34
+ "vocab_size": 54594
35
+ }