New model from https://wandb.ai/wandb/huggingtweets/runs/39gb3xm1
Browse files- README.md +8 -8
- config.json +4 -2
- pytorch_model.bin +2 -2
- special_tokens_map.json +5 -1
- tokenizer.json +0 -0
- tokenizer_config.json +10 -1
- training_args.bin +2 -2
README.md
CHANGED
@@ -1,6 +1,6 @@
|
|
1 |
---
|
2 |
language: en
|
3 |
-
thumbnail: https://
|
4 |
tags:
|
5 |
- huggingtweets
|
6 |
widget:
|
@@ -42,20 +42,20 @@ The model was trained on tweets from Netflix.
|
|
42 |
|
43 |
| Data | Netflix |
|
44 |
| --- | --- |
|
45 |
-
| Tweets downloaded |
|
46 |
-
| Retweets |
|
47 |
-
| Short tweets |
|
48 |
-
| Tweets kept |
|
49 |
|
50 |
-
[Explore the data](https://wandb.ai/wandb/huggingtweets/runs/
|
51 |
|
52 |
## Training procedure
|
53 |
|
54 |
The model is based on a pre-trained [GPT-2](https://huggingface.co/gpt2) which is fine-tuned on @netflix's tweets.
|
55 |
|
56 |
-
Hyperparameters and metrics are recorded in the [W&B training run](https://wandb.ai/wandb/huggingtweets/runs/
|
57 |
|
58 |
-
At the end of training, [the final model](https://wandb.ai/wandb/huggingtweets/runs/
|
59 |
|
60 |
## How to use
|
61 |
|
|
|
1 |
---
|
2 |
language: en
|
3 |
+
thumbnail: https://github.com/borisdayma/huggingtweets/blob/master/img/logo.png?raw=true
|
4 |
tags:
|
5 |
- huggingtweets
|
6 |
widget:
|
|
|
42 |
|
43 |
| Data | Netflix |
|
44 |
| --- | --- |
|
45 |
+
| Tweets downloaded | 3216 |
|
46 |
+
| Retweets | 1549 |
|
47 |
+
| Short tweets | 130 |
|
48 |
+
| Tweets kept | 1537 |
|
49 |
|
50 |
+
[Explore the data](https://wandb.ai/wandb/huggingtweets/runs/1p08449h/artifacts), which is tracked with [W&B artifacts](https://docs.wandb.com/artifacts) at every step of the pipeline.
|
51 |
|
52 |
## Training procedure
|
53 |
|
54 |
The model is based on a pre-trained [GPT-2](https://huggingface.co/gpt2) which is fine-tuned on @netflix's tweets.
|
55 |
|
56 |
+
Hyperparameters and metrics are recorded in the [W&B training run](https://wandb.ai/wandb/huggingtweets/runs/39gb3xm1) for full transparency and reproducibility.
|
57 |
|
58 |
+
At the end of training, [the final model](https://wandb.ai/wandb/huggingtweets/runs/39gb3xm1/artifacts) is logged and versioned.
|
59 |
|
60 |
## How to use
|
61 |
|
config.json
CHANGED
@@ -8,7 +8,6 @@
|
|
8 |
"bos_token_id": 50256,
|
9 |
"embd_pdrop": 0.1,
|
10 |
"eos_token_id": 50256,
|
11 |
-
"gradient_checkpointing": false,
|
12 |
"initializer_range": 0.02,
|
13 |
"layer_norm_epsilon": 1e-05,
|
14 |
"model_type": "gpt2",
|
@@ -18,7 +17,9 @@
|
|
18 |
"n_inner": null,
|
19 |
"n_layer": 12,
|
20 |
"n_positions": 1024,
|
|
|
21 |
"resid_pdrop": 0.1,
|
|
|
22 |
"scale_attn_weights": true,
|
23 |
"summary_activation": null,
|
24 |
"summary_first_dropout": 0.1,
|
@@ -35,7 +36,8 @@
|
|
35 |
"top_p": 0.95
|
36 |
}
|
37 |
},
|
38 |
-
"
|
|
|
39 |
"use_cache": true,
|
40 |
"vocab_size": 50257
|
41 |
}
|
|
|
8 |
"bos_token_id": 50256,
|
9 |
"embd_pdrop": 0.1,
|
10 |
"eos_token_id": 50256,
|
|
|
11 |
"initializer_range": 0.02,
|
12 |
"layer_norm_epsilon": 1e-05,
|
13 |
"model_type": "gpt2",
|
|
|
17 |
"n_inner": null,
|
18 |
"n_layer": 12,
|
19 |
"n_positions": 1024,
|
20 |
+
"reorder_and_upcast_attn": false,
|
21 |
"resid_pdrop": 0.1,
|
22 |
+
"scale_attn_by_inverse_layer_idx": false,
|
23 |
"scale_attn_weights": true,
|
24 |
"summary_activation": null,
|
25 |
"summary_first_dropout": 0.1,
|
|
|
36 |
"top_p": 0.95
|
37 |
}
|
38 |
},
|
39 |
+
"torch_dtype": "float32",
|
40 |
+
"transformers_version": "4.24.0",
|
41 |
"use_cache": true,
|
42 |
"vocab_size": 50257
|
43 |
}
|
pytorch_model.bin
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:d52336b414a5051f7a8de42b8df3164c449065db3964b8f77c33e3b91c7d0ab8
|
3 |
+
size 510396521
|
special_tokens_map.json
CHANGED
@@ -1 +1,5 @@
|
|
1 |
-
{
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"bos_token": "<|endoftext|>",
|
3 |
+
"eos_token": "<|endoftext|>",
|
4 |
+
"unk_token": "<|endoftext|>"
|
5 |
+
}
|
tokenizer.json
CHANGED
The diff for this file is too large to render.
See raw diff
|
|
tokenizer_config.json
CHANGED
@@ -1 +1,10 @@
|
|
1 |
-
{
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"add_prefix_space": false,
|
3 |
+
"bos_token": "<|endoftext|>",
|
4 |
+
"eos_token": "<|endoftext|>",
|
5 |
+
"model_max_length": 1024,
|
6 |
+
"name_or_path": "gpt2",
|
7 |
+
"special_tokens_map_file": null,
|
8 |
+
"tokenizer_class": "GPT2Tokenizer",
|
9 |
+
"unk_token": "<|endoftext|>"
|
10 |
+
}
|
training_args.bin
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:f2e57e6cd4b4b4a6a31e313adcae0ee4f54760be86858568ba84cf2e9ba72365
|
3 |
+
size 3375
|