New model from https://wandb.ai/wandb/huggingtweets/runs/y9vq5ay2
Browse files- README.md +9 -9
- config.json +4 -2
- flax_model.msgpack +0 -3
- pytorch_model.bin +2 -2
- tokenizer.json +0 -0
- tokenizer_config.json +1 -1
- training_args.bin +2 -2
README.md
CHANGED
@@ -1,6 +1,6 @@
|
|
1 |
---
|
2 |
language: en
|
3 |
-
thumbnail: https://
|
4 |
tags:
|
5 |
- huggingtweets
|
6 |
widget:
|
@@ -10,7 +10,7 @@ widget:
|
|
10 |
<div class="inline-flex flex-col" style="line-height: 1.5;">
|
11 |
<div class="flex">
|
12 |
<div
|
13 |
-
style="display:inherit; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url('https://pbs.twimg.com/profile_images/
|
14 |
</div>
|
15 |
<div
|
16 |
style="display:none; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url('')">
|
@@ -42,20 +42,20 @@ The model was trained on tweets from Lady Gaga.
|
|
42 |
|
43 |
| Data | Lady Gaga |
|
44 |
| --- | --- |
|
45 |
-
| Tweets downloaded |
|
46 |
-
| Retweets |
|
47 |
-
| Short tweets |
|
48 |
-
| Tweets kept |
|
49 |
|
50 |
-
[Explore the data](https://wandb.ai/wandb/huggingtweets/runs/
|
51 |
|
52 |
## Training procedure
|
53 |
|
54 |
The model is based on a pre-trained [GPT-2](https://huggingface.co/gpt2) which is fine-tuned on @ladygaga's tweets.
|
55 |
|
56 |
-
Hyperparameters and metrics are recorded in the [W&B training run](https://wandb.ai/wandb/huggingtweets/runs/
|
57 |
|
58 |
-
At the end of training, [the final model](https://wandb.ai/wandb/huggingtweets/runs/
|
59 |
|
60 |
## How to use
|
61 |
|
|
|
1 |
---
|
2 |
language: en
|
3 |
+
thumbnail: https://github.com/borisdayma/huggingtweets/blob/master/img/logo.png?raw=true
|
4 |
tags:
|
5 |
- huggingtweets
|
6 |
widget:
|
|
|
10 |
<div class="inline-flex flex-col" style="line-height: 1.5;">
|
11 |
<div class="flex">
|
12 |
<div
|
13 |
+
style="display:inherit; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url('https://pbs.twimg.com/profile_images/1519346609125003264/rekKHZUq_400x400.jpg')">
|
14 |
</div>
|
15 |
<div
|
16 |
style="display:none; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url('')">
|
|
|
42 |
|
43 |
| Data | Lady Gaga |
|
44 |
| --- | --- |
|
45 |
+
| Tweets downloaded | 3178 |
|
46 |
+
| Retweets | 617 |
|
47 |
+
| Short tweets | 329 |
|
48 |
+
| Tweets kept | 2232 |
|
49 |
|
50 |
+
[Explore the data](https://wandb.ai/wandb/huggingtweets/runs/2otxrql6/artifacts), which is tracked with [W&B artifacts](https://docs.wandb.com/artifacts) at every step of the pipeline.
|
51 |
|
52 |
## Training procedure
|
53 |
|
54 |
The model is based on a pre-trained [GPT-2](https://huggingface.co/gpt2) which is fine-tuned on @ladygaga's tweets.
|
55 |
|
56 |
+
Hyperparameters and metrics are recorded in the [W&B training run](https://wandb.ai/wandb/huggingtweets/runs/y9vq5ay2) for full transparency and reproducibility.
|
57 |
|
58 |
+
At the end of training, [the final model](https://wandb.ai/wandb/huggingtweets/runs/y9vq5ay2/artifacts) is logged and versioned.
|
59 |
|
60 |
## How to use
|
61 |
|
config.json
CHANGED
@@ -8,7 +8,6 @@
|
|
8 |
"bos_token_id": 50256,
|
9 |
"embd_pdrop": 0.1,
|
10 |
"eos_token_id": 50256,
|
11 |
-
"gradient_checkpointing": false,
|
12 |
"initializer_range": 0.02,
|
13 |
"layer_norm_epsilon": 1e-05,
|
14 |
"model_type": "gpt2",
|
@@ -18,7 +17,9 @@
|
|
18 |
"n_inner": null,
|
19 |
"n_layer": 12,
|
20 |
"n_positions": 1024,
|
|
|
21 |
"resid_pdrop": 0.1,
|
|
|
22 |
"scale_attn_weights": true,
|
23 |
"summary_activation": null,
|
24 |
"summary_first_dropout": 0.1,
|
@@ -35,7 +36,8 @@
|
|
35 |
"top_p": 0.95
|
36 |
}
|
37 |
},
|
38 |
-
"
|
|
|
39 |
"use_cache": true,
|
40 |
"vocab_size": 50257
|
41 |
}
|
|
|
8 |
"bos_token_id": 50256,
|
9 |
"embd_pdrop": 0.1,
|
10 |
"eos_token_id": 50256,
|
|
|
11 |
"initializer_range": 0.02,
|
12 |
"layer_norm_epsilon": 1e-05,
|
13 |
"model_type": "gpt2",
|
|
|
17 |
"n_inner": null,
|
18 |
"n_layer": 12,
|
19 |
"n_positions": 1024,
|
20 |
+
"reorder_and_upcast_attn": false,
|
21 |
"resid_pdrop": 0.1,
|
22 |
+
"scale_attn_by_inverse_layer_idx": false,
|
23 |
"scale_attn_weights": true,
|
24 |
"summary_activation": null,
|
25 |
"summary_first_dropout": 0.1,
|
|
|
36 |
"top_p": 0.95
|
37 |
}
|
38 |
},
|
39 |
+
"torch_dtype": "float32",
|
40 |
+
"transformers_version": "4.18.0",
|
41 |
"use_cache": true,
|
42 |
"vocab_size": 50257
|
43 |
}
|
flax_model.msgpack
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:ea08d430d1dc6f90bbdffbd1f1763927badcf6d899ee2d3ef14e7de7aa3f335d
|
3 |
-
size 497764120
|
|
|
|
|
|
|
|
pytorch_model.bin
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:f7c70ad3178c7e5e3cd956e8423a8021598099bde881452cc66df0e0f2040c2a
|
3 |
+
size 510396521
|
tokenizer.json
CHANGED
The diff for this file is too large to render.
See raw diff
|
|
tokenizer_config.json
CHANGED
@@ -1 +1 @@
|
|
1 |
-
{"unk_token": "<|endoftext|>", "bos_token": "<|endoftext|>", "eos_token": "<|endoftext|>", "add_prefix_space": false, "model_max_length": 1024, "special_tokens_map_file": null, "name_or_path": "gpt2"}
|
|
|
1 |
+
{"unk_token": "<|endoftext|>", "bos_token": "<|endoftext|>", "eos_token": "<|endoftext|>", "add_prefix_space": false, "model_max_length": 1024, "special_tokens_map_file": null, "name_or_path": "gpt2", "tokenizer_class": "GPT2Tokenizer"}
|
training_args.bin
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:77496e29fbe641c8e7c32df8bae2b9c7c224c563a03507720df064d90b367dbd
|
3 |
+
size 3055
|