Michelvh commited on
Commit
6cbab1c
1 Parent(s): 1cdcaf4

qlora-llama2-7b-question-generation-eduqg

Browse files
README.md CHANGED
@@ -39,7 +39,7 @@ The following hyperparameters were used during training:
39
  - total_train_batch_size: 4
40
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
41
  - lr_scheduler_type: linear
42
- - num_epochs: 7
43
 
44
  ### Training results
45
 
@@ -47,7 +47,7 @@ The following hyperparameters were used during training:
47
 
48
  ### Framework versions
49
 
50
- - Transformers 4.34.0.dev0
51
  - Pytorch 2.0.0
52
  - Datasets 2.1.0
53
- - Tokenizers 0.14.0
 
39
  - total_train_batch_size: 4
40
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
41
  - lr_scheduler_type: linear
42
+ - num_epochs: 9
43
 
44
  ### Training results
45
 
 
47
 
48
  ### Framework versions
49
 
50
+ - Transformers 4.35.0.dev0
51
  - Pytorch 2.0.0
52
  - Datasets 2.1.0
53
+ - Tokenizers 0.14.1
adapter_config.json CHANGED
@@ -16,8 +16,8 @@
16
  "rank_pattern": {},
17
  "revision": null,
18
  "target_modules": [
19
- "q_proj",
20
- "v_proj"
21
  ],
22
  "task_type": "CAUSAL_LM"
23
  }
 
16
  "rank_pattern": {},
17
  "revision": null,
18
  "target_modules": [
19
+ "v_proj",
20
+ "q_proj"
21
  ],
22
  "task_type": "CAUSAL_LM"
23
  }
adapter_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a9aa0afbd0ed0c2d13594adde00f24c157f1c15361512c36135b62b5c1f065ce
3
  size 16822989
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9026fa5734c7d8e195d928191eb11d32d5e05eb8455bc9bdc176dca77273c000
3
  size 16822989
special_tokens_map.json CHANGED
@@ -1,6 +1,24 @@
1
  {
2
- "bos_token": "<s>",
3
- "eos_token": "</s>",
 
 
 
 
 
 
 
 
 
 
 
 
4
  "pad_token": "</s>",
5
- "unk_token": "<unk>"
 
 
 
 
 
 
6
  }
 
1
  {
2
+ "bos_token": {
3
+ "content": "<s>",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "</s>",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
  "pad_token": "</s>",
17
+ "unk_token": {
18
+ "content": "<unk>",
19
+ "lstrip": false,
20
+ "normalized": false,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ }
24
  }
tokenizer.json CHANGED
@@ -1,6 +1,11 @@
1
  {
2
  "version": "1.0",
3
- "truncation": null,
 
 
 
 
 
4
  "padding": null,
5
  "added_tokens": [
6
  {
 
1
  {
2
  "version": "1.0",
3
+ "truncation": {
4
+ "direction": "Right",
5
+ "max_length": 1024,
6
+ "strategy": "LongestFirst",
7
+ "stride": 0
8
+ },
9
  "padding": null,
10
  "added_tokens": [
11
  {
tokenizer_config.json CHANGED
@@ -25,16 +25,15 @@
25
  "special": true
26
  }
27
  },
28
- "additional_special_tokens": [],
29
  "bos_token": "<s>",
30
  "clean_up_tokenization_spaces": false,
31
  "eos_token": "</s>",
32
  "legacy": false,
33
  "model_max_length": 1000000000000000019884624838656,
34
- "pad_token": null,
35
  "padding_side": "right",
36
  "sp_model_kwargs": {},
37
  "tokenizer_class": "LlamaTokenizer",
38
  "unk_token": "<unk>",
39
- "use_default_system_prompt": true
40
  }
 
25
  "special": true
26
  }
27
  },
 
28
  "bos_token": "<s>",
29
  "clean_up_tokenization_spaces": false,
30
  "eos_token": "</s>",
31
  "legacy": false,
32
  "model_max_length": 1000000000000000019884624838656,
33
+ "pad_token": "</s>",
34
  "padding_side": "right",
35
  "sp_model_kwargs": {},
36
  "tokenizer_class": "LlamaTokenizer",
37
  "unk_token": "<unk>",
38
+ "use_default_system_prompt": false
39
  }
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6f146539640000aadff9aadf10ec8aafaa5d72d8cc031bff3018a8d6528cdb40
3
  size 4155
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:44ce4fd3212f03b378c2fd5c40f6ddeffb9fba512eb8f1a24e427ac892db9ec5
3
  size 4155