ranystephan commited on
Commit
938a732
1 Parent(s): 852a624

NeuralFinGPT-v1-10

Browse files
README.md CHANGED
@@ -18,7 +18,7 @@ should probably proofread and complete it, then remove this comment. -->
18
 
19
  This model is a fine-tuned version of [TheBloke/Mistral-7B-Instruct-v0.2-GPTQ](https://huggingface.co/TheBloke/Mistral-7B-Instruct-v0.2-GPTQ) on the None dataset.
20
  It achieves the following results on the evaluation set:
21
- - Loss: 1.4749
22
 
23
  ## Model description
24
 
@@ -46,22 +46,28 @@ The following hyperparameters were used during training:
46
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
47
  - lr_scheduler_type: cosine
48
  - lr_scheduler_warmup_ratio: 0.03
49
- - num_epochs: 4
50
  - mixed_precision_training: Native AMP
51
 
52
  ### Training results
53
 
54
  | Training Loss | Epoch | Step | Validation Loss |
55
  |:-------------:|:------:|:----:|:---------------:|
56
- | No log | 0.9560 | 19 | 1.6374 |
57
- | 1.8305 | 1.9623 | 39 | 1.5187 |
58
- | 1.5255 | 2.9686 | 59 | 1.4809 |
59
- | 1.4653 | 3.8239 | 76 | 1.4749 |
 
 
 
 
 
 
60
 
61
 
62
  ### Framework versions
63
 
64
- - PEFT 0.10.0
65
  - Transformers 4.40.2
66
  - Pytorch 2.1.0+cu121
67
  - Datasets 2.19.1
 
18
 
19
  This model is a fine-tuned version of [TheBloke/Mistral-7B-Instruct-v0.2-GPTQ](https://huggingface.co/TheBloke/Mistral-7B-Instruct-v0.2-GPTQ) on the None dataset.
20
  It achieves the following results on the evaluation set:
21
+ - Loss: 1.1291
22
 
23
  ## Model description
24
 
 
46
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
47
  - lr_scheduler_type: cosine
48
  - lr_scheduler_warmup_ratio: 0.03
49
+ - num_epochs: 10
50
  - mixed_precision_training: Native AMP
51
 
52
  ### Training results
53
 
54
  | Training Loss | Epoch | Step | Validation Loss |
55
  |:-------------:|:------:|:----:|:---------------:|
56
+ | No log | 0.9560 | 19 | 1.6214 |
57
+ | 1.8609 | 1.9623 | 39 | 1.4791 |
58
+ | 1.5247 | 2.9686 | 59 | 1.3896 |
59
+ | 1.4173 | 3.9748 | 79 | 1.3052 |
60
+ | 1.4173 | 4.9811 | 99 | 1.2385 |
61
+ | 1.3272 | 5.9874 | 119 | 1.1894 |
62
+ | 1.248 | 6.9937 | 139 | 1.1540 |
63
+ | 1.2 | 8.0 | 159 | 1.1351 |
64
+ | 1.1843 | 8.9560 | 178 | 1.1295 |
65
+ | 1.1843 | 9.5597 | 190 | 1.1291 |
66
 
67
 
68
  ### Framework versions
69
 
70
+ - PEFT 0.11.1
71
  - Transformers 4.40.2
72
  - Pytorch 2.1.0+cu121
73
  - Datasets 2.19.1
adapter_config.json CHANGED
@@ -20,8 +20,8 @@
20
  "rank_pattern": {},
21
  "revision": null,
22
  "target_modules": [
23
- "q_proj",
24
- "v_proj"
25
  ],
26
  "task_type": "CAUSAL_LM",
27
  "use_dora": false,
 
20
  "rank_pattern": {},
21
  "revision": null,
22
  "target_modules": [
23
+ "v_proj",
24
+ "q_proj"
25
  ],
26
  "task_type": "CAUSAL_LM",
27
  "use_dora": false,
adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:92d8844e85895d9b62580c84721ee4d6561ae3fdc3841884c19d93f44f2e3aa1
3
  size 109069176
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9aeb34f700a8fd7ed2cf855bd9b3d91363a9a644fa3a090b065312cf7aed7100
3
  size 109069176
runs/May18_17-42-10_d12b2465ac42/events.out.tfevents.1716054148.d12b2465ac42.192.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5f090f5b48eb431253e605b2e1b5dcb0b16f26b75ea63d0d40f93500b8251e33
3
+ size 9786
tokenizer_config.json CHANGED
@@ -1,6 +1,7 @@
1
  {
2
  "add_bos_token": true,
3
- "add_eos_token": true,
 
4
  "added_tokens_decoder": {
5
  "0": {
6
  "content": "<unk>",
@@ -35,7 +36,6 @@
35
  "legacy": true,
36
  "model_max_length": 1000000000000000019884624838656,
37
  "pad_token": "</s>",
38
- "padding_side": "left",
39
  "sp_model_kwargs": {},
40
  "spaces_between_special_tokens": false,
41
  "tokenizer_class": "LlamaTokenizer",
 
1
  {
2
  "add_bos_token": true,
3
+ "add_eos_token": false,
4
+ "add_prefix_space": true,
5
  "added_tokens_decoder": {
6
  "0": {
7
  "content": "<unk>",
 
36
  "legacy": true,
37
  "model_max_length": 1000000000000000019884624838656,
38
  "pad_token": "</s>",
 
39
  "sp_model_kwargs": {},
40
  "spaces_between_special_tokens": false,
41
  "tokenizer_class": "LlamaTokenizer",
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:19e4257109a1c5d3b2d518eca4da131e5c14f844531b257ee99da3d60d26d31c
3
  size 4984
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:af72dd139cdb84c9e1df04e33cec371d1e84cf845137241f245555f9057f98c7
3
  size 4984