isspek commited on
Commit
d4a8a43
1 Parent(s): 0798c43

Upload 7 files

Browse files
adapter_config.json CHANGED
@@ -9,14 +9,6 @@
9
  "modules_to_save": null,
10
  "peft_type": "LORA",
11
  "r": 16,
12
- "id2label":{
13
- "0": "unbiased",
14
- "1": "biased"
15
- },
16
- "label2id": {
17
- "unbiased": "0",
18
- "biased": "1"
19
- },
20
  "target_modules": [
21
  "query",
22
  "value"
 
9
  "modules_to_save": null,
10
  "peft_type": "LORA",
11
  "r": 16,
 
 
 
 
 
 
 
 
12
  "target_modules": [
13
  "query",
14
  "value"
adapter_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a37143d0f52fb44cfc82d35b6f918feec180697f8516ee12df0aa21080dd7af6
3
  size 4746701
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4581a650652a2e6df6662b5b504b9f46aec202fe8537bacdc14ad57663f0af63
3
  size 4746701
merges.txt CHANGED
@@ -1,4 +1,4 @@
1
- #version: 0.2 - Trained by `huggingface/tokenizers`
2
  Ġ t
3
  Ġ a
4
  h e
 
1
+ #version: 0.2
2
  Ġ t
3
  Ġ a
4
  h e
tokenizer.json CHANGED
@@ -96,6 +96,7 @@
96
  "continuing_subword_prefix": "",
97
  "end_of_word_suffix": "",
98
  "fuse_unk": false,
 
99
  "vocab": {
100
  "<s>": 0,
101
  "<pad>": 1,
 
96
  "continuing_subword_prefix": "",
97
  "end_of_word_suffix": "",
98
  "fuse_unk": false,
99
+ "byte_fallback": false,
100
  "vocab": {
101
  "<s>": 0,
102
  "<pad>": 1,
tokenizer_config.json CHANGED
@@ -1,14 +1,14 @@
1
  {
2
  "add_prefix_space": false,
3
  "bos_token": "<s>",
4
- "clean_up_tokenization_spaces": true,
5
  "cls_token": "<s>",
6
  "eos_token": "</s>",
7
  "errors": "replace",
8
  "mask_token": "<mask>",
9
- "model_max_length": 1000000000000000019884624838656,
10
  "pad_token": "<pad>",
11
  "sep_token": "</s>",
 
12
  "tokenizer_class": "RobertaTokenizer",
13
  "trim_offsets": true,
14
  "unk_token": "<unk>"
 
1
  {
2
  "add_prefix_space": false,
3
  "bos_token": "<s>",
 
4
  "cls_token": "<s>",
5
  "eos_token": "</s>",
6
  "errors": "replace",
7
  "mask_token": "<mask>",
8
+ "model_max_length": 512,
9
  "pad_token": "<pad>",
10
  "sep_token": "</s>",
11
+ "special_tokens_map_file": null,
12
  "tokenizer_class": "RobertaTokenizer",
13
  "trim_offsets": true,
14
  "unk_token": "<unk>"