anujsahani01 commited on
Commit
7c40aca
1 Parent(s): 935737d

Training in progress, step 500

Browse files
adapter_config.json CHANGED
@@ -1,6 +1,7 @@
1
  {
 
2
  "auto_mapping": null,
3
- "base_model_name_or_path": "Salesforce/codegen-350M-mono",
4
  "bias": "none",
5
  "fan_in_fan_out": false,
6
  "inference_mode": true,
@@ -12,9 +13,10 @@
12
  "modules_to_save": null,
13
  "peft_type": "LORA",
14
  "r": 8,
 
15
  "revision": null,
16
  "target_modules": [
17
- "qkv_proj"
18
  ],
19
  "task_type": "CAUSAL_LM"
20
  }
 
1
  {
2
+ "alpha_pattern": {},
3
  "auto_mapping": null,
4
+ "base_model_name_or_path": "codeparrot/codeparrot-small",
5
  "bias": "none",
6
  "fan_in_fan_out": false,
7
  "inference_mode": true,
 
13
  "modules_to_save": null,
14
  "peft_type": "LORA",
15
  "r": 8,
16
+ "rank_pattern": {},
17
  "revision": null,
18
  "target_modules": [
19
+ "c_attn"
20
  ],
21
  "task_type": "CAUSAL_LM"
22
  }
adapter_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a7fbfe3de320fee92686dae4ea51384102e7601e2b2c0241ebb5194094d82261
3
- size 2635433
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f407981173dc9c72505babb41c0d1741e37fe068d767ca3b40f7038b7687b332
3
+ size 1188025
added_tokens.json CHANGED
@@ -1,45 +1,7 @@
1
  {
2
- "\t\t": 50294,
3
- "\t\t\t": 50293,
4
- "\t\t\t\t": 50292,
5
- "\t\t\t\t\t": 50291,
6
- "\t\t\t\t\t\t": 50290,
7
- "\t\t\t\t\t\t\t": 50289,
8
- "\t\t\t\t\t\t\t\t": 50288,
9
- "\t\t\t\t\t\t\t\t\t": 50287,
10
- " ": 50286,
11
- " ": 50285,
12
- " ": 50284,
13
- " ": 50283,
14
- " ": 50282,
15
- " ": 50281,
16
- " ": 50280,
17
- " ": 50279,
18
- " ": 50278,
19
- " ": 50277,
20
- " ": 50276,
21
- " ": 50275,
22
- " ": 50274,
23
- " ": 50273,
24
- " ": 50272,
25
- " ": 50271,
26
- " ": 50270,
27
- " ": 50269,
28
- " ": 50268,
29
- " ": 50267,
30
- " ": 50266,
31
- " ": 50265,
32
- " ": 50264,
33
- " ": 50263,
34
- " ": 50262,
35
- " ": 50261,
36
- " ": 50260,
37
- " ": 50259,
38
- " ": 50258,
39
- " ": 50257,
40
- "<ASSISTANT_TASK:>": 50298,
41
- "<END_TASK>": 50299,
42
- "<SYSTEM_TASK:>": 50296,
43
- "<USER_TASK:>": 50297,
44
- "<|PAD|>": 50295
45
  }
 
1
  {
2
+ "<ASSISTANT_TASK:>": 32771,
3
+ "<END_TASK>": 32772,
4
+ "<SYSTEM_TASK:>": 32769,
5
+ "<USER_TASK:>": 32770,
6
+ "<|PAD|>": 32768
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7
  }
merges.txt CHANGED
The diff for this file is too large to render. See raw diff
 
tokenizer.json CHANGED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json CHANGED
@@ -1,5 +1,55 @@
1
  {
2
  "add_prefix_space": false,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3
  "additional_special_tokens": [
4
  "<SYSTEM_TASK:>",
5
  "<USER_TASK:>",
@@ -9,8 +59,8 @@
9
  "bos_token": "<|endoftext|>",
10
  "clean_up_tokenization_spaces": true,
11
  "eos_token": "<|endoftext|>",
12
- "model_max_length": 2048,
13
  "pad_token": "<|PAD|>",
14
- "tokenizer_class": "CodeGenTokenizer",
15
  "unk_token": "<|endoftext|>"
16
  }
 
1
  {
2
  "add_prefix_space": false,
3
+ "added_tokens_decoder": {
4
+ "0": {
5
+ "content": "<|endoftext|>",
6
+ "lstrip": false,
7
+ "normalized": false,
8
+ "rstrip": false,
9
+ "single_word": false,
10
+ "special": true
11
+ },
12
+ "32768": {
13
+ "content": "<|PAD|>",
14
+ "lstrip": false,
15
+ "normalized": false,
16
+ "rstrip": false,
17
+ "single_word": false,
18
+ "special": true
19
+ },
20
+ "32769": {
21
+ "content": "<SYSTEM_TASK:>",
22
+ "lstrip": false,
23
+ "normalized": true,
24
+ "rstrip": false,
25
+ "single_word": false,
26
+ "special": false
27
+ },
28
+ "32770": {
29
+ "content": "<USER_TASK:>",
30
+ "lstrip": false,
31
+ "normalized": true,
32
+ "rstrip": false,
33
+ "single_word": false,
34
+ "special": false
35
+ },
36
+ "32771": {
37
+ "content": "<ASSISTANT_TASK:>",
38
+ "lstrip": false,
39
+ "normalized": true,
40
+ "rstrip": false,
41
+ "single_word": false,
42
+ "special": false
43
+ },
44
+ "32772": {
45
+ "content": "<END_TASK>",
46
+ "lstrip": false,
47
+ "normalized": true,
48
+ "rstrip": false,
49
+ "single_word": false,
50
+ "special": false
51
+ }
52
+ },
53
  "additional_special_tokens": [
54
  "<SYSTEM_TASK:>",
55
  "<USER_TASK:>",
 
59
  "bos_token": "<|endoftext|>",
60
  "clean_up_tokenization_spaces": true,
61
  "eos_token": "<|endoftext|>",
62
+ "model_max_length": 5000,
63
  "pad_token": "<|PAD|>",
64
+ "tokenizer_class": "GPT2Tokenizer",
65
  "unk_token": "<|endoftext|>"
66
  }
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:514277c1178436a679fc1c176f6553af2198dc0889010b1b9d9306673a3e8302
3
- size 3963
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9c040461485db7bce8feff4c5cffd57b1e1496726da837ee598d72c1d36f4816
3
+ size 4027
vocab.json CHANGED
The diff for this file is too large to render. See raw diff