Norphel commited on
Commit
83dba29
1 Parent(s): ff009b8

End of training

Browse files
README.md CHANGED
@@ -1,6 +1,6 @@
1
  ---
2
  license: apache-2.0
3
- base_model: google-bert/bert-base-multilingual-cased
4
  tags:
5
  - generated_from_trainer
6
  model-index:
@@ -13,9 +13,9 @@ should probably proofread and complete it, then remove this comment. -->
13
 
14
  # dzoQA_ml
15
 
16
- This model is a fine-tuned version of [google-bert/bert-base-multilingual-cased](https://huggingface.co/google-bert/bert-base-multilingual-cased) on an unknown dataset.
17
  It achieves the following results on the evaluation set:
18
- - Loss: 7.5072
19
 
20
  ## Model description
21
 
@@ -40,15 +40,21 @@ The following hyperparameters were used during training:
40
  - seed: 42
41
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
42
  - lr_scheduler_type: linear
43
- - num_epochs: 3
44
 
45
  ### Training results
46
 
47
  | Training Loss | Epoch | Step | Validation Loss |
48
  |:-------------:|:-----:|:----:|:---------------:|
49
- | No log | 1.0 | 17 | 7.1536 |
50
- | No log | 2.0 | 34 | 7.5442 |
51
- | No log | 3.0 | 51 | 7.5072 |
 
 
 
 
 
 
52
 
53
 
54
  ### Framework versions
 
1
  ---
2
  license: apache-2.0
3
+ base_model: distilbert/distilbert-base-uncased
4
  tags:
5
  - generated_from_trainer
6
  model-index:
 
13
 
14
  # dzoQA_ml
15
 
16
+ This model is a fine-tuned version of [distilbert/distilbert-base-uncased](https://huggingface.co/distilbert/distilbert-base-uncased) on an unknown dataset.
17
  It achieves the following results on the evaluation set:
18
+ - Loss: 5.2318
19
 
20
  ## Model description
21
 
 
40
  - seed: 42
41
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
42
  - lr_scheduler_type: linear
43
+ - num_epochs: 9
44
 
45
  ### Training results
46
 
47
  | Training Loss | Epoch | Step | Validation Loss |
48
  |:-------------:|:-----:|:----:|:---------------:|
49
+ | No log | 1.0 | 17 | 5.5030 |
50
+ | No log | 2.0 | 34 | 5.4155 |
51
+ | No log | 3.0 | 51 | 5.3362 |
52
+ | No log | 4.0 | 68 | 5.3240 |
53
+ | No log | 5.0 | 85 | 5.2659 |
54
+ | No log | 6.0 | 102 | 5.2410 |
55
+ | No log | 7.0 | 119 | 5.2223 |
56
+ | No log | 8.0 | 136 | 5.2406 |
57
+ | No log | 9.0 | 153 | 5.2318 |
58
 
59
 
60
  ### Framework versions
config.json CHANGED
@@ -1,31 +1,24 @@
1
  {
2
- "_name_or_path": "google-bert/bert-base-multilingual-cased",
 
3
  "architectures": [
4
- "BertForQuestionAnswering"
5
  ],
6
- "attention_probs_dropout_prob": 0.1,
7
- "classifier_dropout": null,
8
- "directionality": "bidi",
9
- "hidden_act": "gelu",
10
- "hidden_dropout_prob": 0.1,
11
- "hidden_size": 768,
12
  "initializer_range": 0.02,
13
- "intermediate_size": 3072,
14
- "layer_norm_eps": 1e-12,
15
  "max_position_embeddings": 512,
16
- "model_type": "bert",
17
- "num_attention_heads": 12,
18
- "num_hidden_layers": 12,
19
  "pad_token_id": 0,
20
- "pooler_fc_size": 768,
21
- "pooler_num_attention_heads": 12,
22
- "pooler_num_fc_layers": 3,
23
- "pooler_size_per_head": 128,
24
- "pooler_type": "first_token_transform",
25
- "position_embedding_type": "absolute",
26
  "torch_dtype": "float32",
27
  "transformers_version": "4.40.2",
28
- "type_vocab_size": 2,
29
- "use_cache": true,
30
- "vocab_size": 119547
31
  }
 
1
  {
2
+ "_name_or_path": "distilbert/distilbert-base-uncased",
3
+ "activation": "gelu",
4
  "architectures": [
5
+ "DistilBertForQuestionAnswering"
6
  ],
7
+ "attention_dropout": 0.1,
8
+ "dim": 768,
9
+ "dropout": 0.1,
10
+ "hidden_dim": 3072,
 
 
11
  "initializer_range": 0.02,
 
 
12
  "max_position_embeddings": 512,
13
+ "model_type": "distilbert",
14
+ "n_heads": 12,
15
+ "n_layers": 6,
16
  "pad_token_id": 0,
17
+ "qa_dropout": 0.1,
18
+ "seq_classif_dropout": 0.2,
19
+ "sinusoidal_pos_embds": false,
20
+ "tie_weights_": true,
 
 
21
  "torch_dtype": "float32",
22
  "transformers_version": "4.40.2",
23
+ "vocab_size": 30522
 
 
24
  }
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:748206fd3b8529f4927b2c408cc8149a327a949d0504d8fa9ebb574faf1a4c35
3
- size 709080904
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4a0cf06565271ac16eebe172709b89bba1f5b7f8a1dadb92f865d9b05a895525
3
+ size 265470032
runs/May12_11-56-27_01050a5ad463/events.out.tfevents.1715514991.01050a5ad463.3537.3 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dfa646ed9f3f296f7d128fd7ba8c22e66f61ec807aa7d4d4531fe177fab2f273
3
+ size 7297
tokenizer.json CHANGED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json CHANGED
@@ -43,13 +43,13 @@
43
  },
44
  "clean_up_tokenization_spaces": true,
45
  "cls_token": "[CLS]",
46
- "do_lower_case": false,
47
  "mask_token": "[MASK]",
48
  "model_max_length": 512,
49
  "pad_token": "[PAD]",
50
  "sep_token": "[SEP]",
51
  "strip_accents": null,
52
  "tokenize_chinese_chars": true,
53
- "tokenizer_class": "BertTokenizer",
54
  "unk_token": "[UNK]"
55
  }
 
43
  },
44
  "clean_up_tokenization_spaces": true,
45
  "cls_token": "[CLS]",
46
+ "do_lower_case": true,
47
  "mask_token": "[MASK]",
48
  "model_max_length": 512,
49
  "pad_token": "[PAD]",
50
  "sep_token": "[SEP]",
51
  "strip_accents": null,
52
  "tokenize_chinese_chars": true,
53
+ "tokenizer_class": "DistilBertTokenizer",
54
  "unk_token": "[UNK]"
55
  }
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:bf6b6eedf6d775923837c3e3479be2e2067d244544e87b41241343c0a307ad58
3
  size 4984
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:187097fc6101f836fcc5dd3c5eb82cd40da080eec328859f1b4c17afa3cfcea4
3
  size 4984
vocab.txt CHANGED
The diff for this file is too large to render. See raw diff