arjuntheprogrammer commited on
Commit
fc07afd
1 Parent(s): 4b29c85

End of training

Browse files
.gitignore ADDED
@@ -0,0 +1 @@
 
1
+ checkpoint-*/
README.md ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+ tags:
4
+ - generated_from_trainer
5
+ datasets:
6
+ - amazon_reviews_multi
7
+ metrics:
8
+ - accuracy
9
+ - f1
10
+ model-index:
11
+ - name: distilbert-base-multilingual-cased-sentiment-2
12
+ results:
13
+ - task:
14
+ name: Text Classification
15
+ type: text-classification
16
+ dataset:
17
+ name: amazon_reviews_multi
18
+ type: amazon_reviews_multi
19
+ args: en
20
+ metrics:
21
+ - name: Accuracy
22
+ type: accuracy
23
+ value: 0.7614
24
+ - name: F1
25
+ type: f1
26
+ value: 0.7614
27
+ ---
28
+
29
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
30
+ should probably proofread and complete it, then remove this comment. -->
31
+
32
+ # distilbert-base-multilingual-cased-sentiment-2
33
+
34
+ This model is a fine-tuned version of [distilbert-base-multilingual-cased](https://huggingface.co/distilbert-base-multilingual-cased) on the amazon_reviews_multi dataset.
35
+ It achieves the following results on the evaluation set:
36
+ - Loss: 0.5882
37
+ - Accuracy: 0.7614
38
+ - F1: 0.7614
39
+
40
+ ## Model description
41
+
42
+ More information needed
43
+
44
+ ## Intended uses & limitations
45
+
46
+ More information needed
47
+
48
+ ## Training and evaluation data
49
+
50
+ More information needed
51
+
52
+ ## Training procedure
53
+
54
+ ### Training hyperparameters
55
+
56
+ The following hyperparameters were used during training:
57
+ - learning_rate: 0.00024
58
+ - train_batch_size: 16
59
+ - eval_batch_size: 16
60
+ - seed: 33
61
+ - distributed_type: sagemaker_data_parallel
62
+ - num_devices: 8
63
+ - total_train_batch_size: 128
64
+ - total_eval_batch_size: 128
65
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
66
+ - lr_scheduler_type: linear
67
+ - lr_scheduler_warmup_steps: 500
68
+ - num_epochs: 3
69
+ - mixed_precision_training: Native AMP
70
+
71
+ ### Training results
72
+
73
+
74
+
75
+ ### Framework versions
76
+
77
+ - Transformers 4.12.3
78
+ - Pytorch 1.9.1
79
+ - Datasets 1.15.1
80
+ - Tokenizers 0.10.3
config.json ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "distilbert-base-multilingual-cased",
3
+ "activation": "gelu",
4
+ "architectures": [
5
+ "DistilBertForSequenceClassification"
6
+ ],
7
+ "attention_dropout": 0.1,
8
+ "dim": 768,
9
+ "dropout": 0.1,
10
+ "hidden_dim": 3072,
11
+ "id2label": {
12
+ "0": "negative",
13
+ "1": "neutral",
14
+ "2": "positive"
15
+ },
16
+ "initializer_range": 0.02,
17
+ "label2id": {
18
+ "negative": "0",
19
+ "neutral": "1",
20
+ "positive": "2"
21
+ },
22
+ "max_position_embeddings": 512,
23
+ "model_type": "distilbert",
24
+ "n_heads": 12,
25
+ "n_layers": 6,
26
+ "output_past": true,
27
+ "pad_token_id": 0,
28
+ "problem_type": "single_label_classification",
29
+ "qa_dropout": 0.1,
30
+ "seq_classif_dropout": 0.2,
31
+ "sinusoidal_pos_embds": false,
32
+ "tie_weights_": true,
33
+ "torch_dtype": "float32",
34
+ "transformers_version": "4.12.3",
35
+ "vocab_size": 119547
36
+ }
logs/1643814415.8096128/events.out.tfevents.1643814415.algo-1.507.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3089f90a0e6180f50970a54a2e64111b7a0d15a064976dfb59869dfde7e14c67
3
+ size 4519
logs/events.out.tfevents.1643814415.algo-1.507.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:73648544830ffe1b2dd019e61fec166e47d3dc66012edc22f47eb27f6475a34a
3
+ size 3590
logs/events.out.tfevents.1643814993.algo-1.507.2 ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:81c00ec6c00aed9f25c9336f4010f9938d620d2221d9f0418cbed721e150a295
3
+ size 409
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:87fa8db2808dd1041989ea9afa088994c07b9a77f821a1fd44fc106d80d712f9
3
+ size 541347953
special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
1
+ {"unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]"}
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
1
+ {"do_lower_case": false, "unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]", "tokenize_chinese_chars": true, "strip_accents": null, "model_max_length": 512, "special_tokens_map_file": null, "name_or_path": "distilbert-base-multilingual-cased", "tokenizer_class": "DistilBertTokenizer"}
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fa716cbc5720080c7a586bd25957a4f9ab5cec339627943679e6ea1a0576f7fb
3
+ size 2863
vocab.txt ADDED
The diff for this file is too large to render. See raw diff