lewtun HF staff commited on
Commit
bce3e6e
1 Parent(s): 0201e3d
.gitignore ADDED
@@ -0,0 +1 @@
 
1
+ checkpoint-*/
README.md ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ tags:
3
+ - generated_from_trainer
4
+ datasets:
5
+ - amazon_reviews_multi
6
+ metrics:
7
+ - accuracy
8
+ model_index:
9
+ - name: roberta-base-bne-finetuned-amazon_reviews_multi-finetuned-amazon_reviews_multi
10
+ results:
11
+ - task:
12
+ name: Text Classification
13
+ type: text-classification
14
+ dataset:
15
+ name: amazon_reviews_multi
16
+ type: amazon_reviews_multi
17
+ args: es
18
+ metric:
19
+ name: Accuracy
20
+ type: accuracy
21
+ value: 0.9285
22
+ ---
23
+
24
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
25
+ should probably proofread and complete it, then remove this comment. -->
26
+
27
+ # roberta-base-bne-finetuned-amazon_reviews_multi-finetuned-amazon_reviews_multi
28
+
29
+ This model was trained from scratch on the amazon_reviews_multi dataset.
30
+ It achieves the following results on the evaluation set:
31
+ - Loss: 0.3595
32
+ - Accuracy: 0.9285
33
+
34
+ ## Model description
35
+
36
+ More information needed
37
+
38
+ ## Intended uses & limitations
39
+
40
+ More information needed
41
+
42
+ ## Training and evaluation data
43
+
44
+ More information needed
45
+
46
+ ## Training procedure
47
+
48
+ ### Training hyperparameters
49
+
50
+ The following hyperparameters were used during training:
51
+ - learning_rate: 2e-05
52
+ - train_batch_size: 16
53
+ - eval_batch_size: 16
54
+ - seed: 42
55
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
56
+ - lr_scheduler_type: linear
57
+ - num_epochs: 2
58
+
59
+ ### Training results
60
+
61
+ | Training Loss | Epoch | Step | Validation Loss | Accuracy |
62
+ |:-------------:|:-----:|:----:|:---------------:|:--------:|
63
+ | 0.103 | 1.0 | 1250 | 0.2864 | 0.928 |
64
+ | 0.0407 | 2.0 | 2500 | 0.3595 | 0.9285 |
65
+
66
+
67
+ ### Framework versions
68
+
69
+ - Transformers 4.9.2
70
+ - Pytorch 1.9.0+cu102
71
+ - Datasets 1.11.0
72
+ - Tokenizers 0.10.3
config.json ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "lewtun/roberta-base-bne-finetuned-amazon_reviews_multi",
3
+ "architectures": [
4
+ "RobertaForSequenceClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.0,
7
+ "bos_token_id": 0,
8
+ "eos_token_id": 2,
9
+ "gradient_checkpointing": false,
10
+ "hidden_act": "gelu",
11
+ "hidden_dropout_prob": 0.0,
12
+ "hidden_size": 768,
13
+ "id2label": {
14
+ "0": "NEGATIVO",
15
+ "1": "POSITIVO"
16
+ },
17
+ "initializer_range": 0.02,
18
+ "intermediate_size": 3072,
19
+ "label2id": {
20
+ "NEGATIVO": 0,
21
+ "POSITIVO": 1
22
+ },
23
+ "layer_norm_eps": 1e-05,
24
+ "max_position_embeddings": 514,
25
+ "model_type": "roberta",
26
+ "num_attention_heads": 12,
27
+ "num_hidden_layers": 12,
28
+ "pad_token_id": 1,
29
+ "position_embedding_type": "absolute",
30
+ "problem_type": "single_label_classification",
31
+ "torch_dtype": "float32",
32
+ "transformers_version": "4.9.2",
33
+ "type_vocab_size": 1,
34
+ "use_cache": true,
35
+ "vocab_size": 50262
36
+ }
merges.txt ADDED
The diff for this file is too large to render. See raw diff
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5ca4f70c6220dc965be5bfe0ab41275fc267022391dae3966cd9eceb04d6684c
3
+ size 498664877
runs/Aug22_18-42-34_13ee57411a2d/1629657761.5489256/events.out.tfevents.1629657761.13ee57411a2d.79.3 ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:eb597f3e30b18a9d0a36c2bff76d4fa804365e1097a728448eb587f6ebc2ed8f
3
+ size 4249
runs/Aug22_18-42-34_13ee57411a2d/events.out.tfevents.1629657761.13ee57411a2d.79.2 ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:035494b900e2e98145b372c81aa8eb918c85cff616d55699c110c3eb7af692e3
3
+ size 5529
special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
1
+ {"bos_token": {"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "eos_token": {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "unk_token": {"content": "<unk>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "sep_token": {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "pad_token": {"content": "<pad>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "cls_token": {"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "mask_token": {"content": "<mask>", "single_word": false, "lstrip": true, "rstrip": false, "normalized": true}}
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
1
+ {"unk_token": {"content": "<unk>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "bos_token": {"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "eos_token": {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "add_prefix_space": false, "errors": "replace", "sep_token": {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "cls_token": {"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "pad_token": {"content": "<pad>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "mask_token": {"content": "<mask>", "single_word": false, "lstrip": true, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "max_len": 512, "special_tokens_map_file": null, "name_or_path": "BSC-TeMU/roberta-base-bne", "tokenizer_class": "RobertaTokenizer"}
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3175e7aeb9295893d6c80f14834ee4f12f91f315f0c2ad54cc6902cfce807d9d
3
+ size 2735
vocab.json ADDED
The diff for this file is too large to render. See raw diff