Update README.md
Browse files
README.md
CHANGED
@@ -3,33 +3,30 @@ license: apache-2.0
|
|
3 |
base_model: distilroberta-base
|
4 |
tags:
|
5 |
- generated_from_trainer
|
6 |
-
|
7 |
-
|
8 |
-
|
|
|
|
|
|
|
|
|
9 |
---
|
10 |
|
11 |
-
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
|
12 |
-
should probably proofread and complete it, then remove this comment. -->
|
13 |
|
14 |
[<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="200" height="32"/>](https://wandb.ai/pszemraj/eduscore-regression/runs/8e2uvp5t)
|
15 |
-
# distilroberta-base-
|
|
|
16 |
|
17 |
This model is a fine-tuned version of [distilroberta-base](https://huggingface.co/distilroberta-base) on the HuggingFaceFW/fineweb-edu-llama3-annotations dataset.
|
18 |
It achieves the following results on the evaluation set:
|
19 |
- Loss: 0.2194
|
20 |
- Mse: 0.2194
|
21 |
|
22 |
-
## Model description
|
23 |
-
|
24 |
-
More information needed
|
25 |
|
26 |
-
##
|
27 |
|
28 |
-
More information needed
|
29 |
|
30 |
-
|
31 |
-
|
32 |
-
More information needed
|
33 |
|
34 |
## Training procedure
|
35 |
|
@@ -46,50 +43,3 @@ The following hyperparameters were used during training:
|
|
46 |
- lr_scheduler_type: linear
|
47 |
- lr_scheduler_warmup_ratio: 0.05
|
48 |
- num_epochs: 1.0
|
49 |
-
|
50 |
-
### Training results
|
51 |
-
|
52 |
-
| Training Loss | Epoch | Step | Validation Loss | Mse |
|
53 |
-
|:-------------:|:------:|:----:|:---------------:|:------:|
|
54 |
-
| 0.5276 | 0.0288 | 100 | 0.5012 | 0.5012 |
|
55 |
-
| 0.3307 | 0.0576 | 200 | 0.3467 | 0.3467 |
|
56 |
-
| 0.2994 | 0.0865 | 300 | 0.2948 | 0.2948 |
|
57 |
-
| 0.2813 | 0.1153 | 400 | 0.2799 | 0.2799 |
|
58 |
-
| 0.2707 | 0.1441 | 500 | 0.3017 | 0.3017 |
|
59 |
-
| 0.2506 | 0.1729 | 600 | 0.2699 | 0.2699 |
|
60 |
-
| 0.2584 | 0.2018 | 700 | 0.2633 | 0.2633 |
|
61 |
-
| 0.2603 | 0.2306 | 800 | 0.2434 | 0.2434 |
|
62 |
-
| 0.2973 | 0.2594 | 900 | 0.2394 | 0.2394 |
|
63 |
-
| 0.2541 | 0.2882 | 1000 | 0.2356 | 0.2356 |
|
64 |
-
| 0.2837 | 0.3171 | 1100 | 0.2437 | 0.2437 |
|
65 |
-
| 0.242 | 0.3459 | 1200 | 0.2379 | 0.2379 |
|
66 |
-
| 0.2379 | 0.3747 | 1300 | 0.2270 | 0.2270 |
|
67 |
-
| 0.23 | 0.4035 | 1400 | 0.2357 | 0.2357 |
|
68 |
-
| 0.2345 | 0.4324 | 1500 | 0.2417 | 0.2417 |
|
69 |
-
| 0.2574 | 0.4612 | 1600 | 0.2556 | 0.2556 |
|
70 |
-
| 0.264 | 0.4900 | 1700 | 0.2452 | 0.2452 |
|
71 |
-
| 0.2596 | 0.5188 | 1800 | 0.2215 | 0.2215 |
|
72 |
-
| 0.244 | 0.5477 | 1900 | 0.2269 | 0.2269 |
|
73 |
-
| 0.2225 | 0.5765 | 2000 | 0.2342 | 0.2342 |
|
74 |
-
| 0.2475 | 0.6053 | 2100 | 0.2403 | 0.2403 |
|
75 |
-
| 0.253 | 0.6341 | 2200 | 0.2326 | 0.2326 |
|
76 |
-
| 0.2435 | 0.6630 | 2300 | 0.2161 | 0.2161 |
|
77 |
-
| 0.2865 | 0.6918 | 2400 | 0.2265 | 0.2265 |
|
78 |
-
| 0.2351 | 0.7206 | 2500 | 0.2343 | 0.2343 |
|
79 |
-
| 0.2582 | 0.7494 | 2600 | 0.2342 | 0.2342 |
|
80 |
-
| 0.2167 | 0.7783 | 2700 | 0.2337 | 0.2337 |
|
81 |
-
| 0.2495 | 0.8071 | 2800 | 0.2273 | 0.2273 |
|
82 |
-
| 0.2364 | 0.8359 | 2900 | 0.2298 | 0.2298 |
|
83 |
-
| 0.2236 | 0.8647 | 3000 | 0.2170 | 0.2170 |
|
84 |
-
| 0.231 | 0.8936 | 3100 | 0.2234 | 0.2234 |
|
85 |
-
| 0.2474 | 0.9224 | 3200 | 0.2227 | 0.2227 |
|
86 |
-
| 0.2333 | 0.9512 | 3300 | 0.2241 | 0.2241 |
|
87 |
-
| 0.2265 | 0.9800 | 3400 | 0.2197 | 0.2197 |
|
88 |
-
|
89 |
-
|
90 |
-
### Framework versions
|
91 |
-
|
92 |
-
- Transformers 4.42.3
|
93 |
-
- Pytorch 2.3.1+cu121
|
94 |
-
- Datasets 2.20.0
|
95 |
-
- Tokenizers 0.19.1
|
|
|
3 |
base_model: distilroberta-base
|
4 |
tags:
|
5 |
- generated_from_trainer
|
6 |
+
inference: false
|
7 |
+
datasets:
|
8 |
+
- HuggingFaceFW/fineweb-edu-llama3-annotations
|
9 |
+
language:
|
10 |
+
- en
|
11 |
+
metrics:
|
12 |
+
- mse
|
13 |
---
|
14 |
|
|
|
|
|
15 |
|
16 |
[<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="200" height="32"/>](https://wandb.ai/pszemraj/eduscore-regression/runs/8e2uvp5t)
|
17 |
+
# distilroberta-base-edu-classifier
|
18 |
+
|
19 |
|
20 |
This model is a fine-tuned version of [distilroberta-base](https://huggingface.co/distilroberta-base) on the HuggingFaceFW/fineweb-edu-llama3-annotations dataset.
|
21 |
It achieves the following results on the evaluation set:
|
22 |
- Loss: 0.2194
|
23 |
- Mse: 0.2194
|
24 |
|
|
|
|
|
|
|
25 |
|
26 |
+
## Usage
|
27 |
|
|
|
28 |
|
29 |
+
Same as the others, will add later
|
|
|
|
|
30 |
|
31 |
## Training procedure
|
32 |
|
|
|
43 |
- lr_scheduler_type: linear
|
44 |
- lr_scheduler_warmup_ratio: 0.05
|
45 |
- num_epochs: 1.0
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|