boris commited on
Commit
d8fee05
1 Parent(s): 15a7b45

New model from https://wandb.ai/wandb/huggingtweets/runs/10czim3i

Browse files
Files changed (5) hide show
  1. README.md +8 -8
  2. config.json +3 -2
  3. pytorch_model.bin +2 -2
  4. tokenizer.json +0 -0
  5. training_args.bin +2 -2
README.md CHANGED
@@ -1,6 +1,6 @@
1
  ---
2
  language: en
3
- thumbnail: https://www.huggingtweets.com/foxnews/1629574796027/predictions.png
4
  tags:
5
  - huggingtweets
6
  widget:
@@ -10,7 +10,7 @@ widget:
10
  <div class="inline-flex flex-col" style="line-height: 1.5;">
11
  <div class="flex">
12
  <div
13
- style="display:inherit; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;https://pbs.twimg.com/profile_images/918480715158716419/4X8oCbge_400x400.jpg&#39;)">
14
  </div>
15
  <div
16
  style="display:none; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;&#39;)">
@@ -43,19 +43,19 @@ The model was trained on tweets from Fox News.
43
  | Data | Fox News |
44
  | --- | --- |
45
  | Tweets downloaded | 3250 |
46
- | Retweets | 17 |
47
- | Short tweets | 5 |
48
- | Tweets kept | 3228 |
49
 
50
- [Explore the data](https://wandb.ai/wandb/huggingtweets/runs/1vjlmqz6/artifacts), which is tracked with [W&B artifacts](https://docs.wandb.com/artifacts) at every step of the pipeline.
51
 
52
  ## Training procedure
53
 
54
  The model is based on a pre-trained [GPT-2](https://huggingface.co/gpt2) which is fine-tuned on @foxnews's tweets.
55
 
56
- Hyperparameters and metrics are recorded in the [W&B training run](https://wandb.ai/wandb/huggingtweets/runs/2dipn7wd) for full transparency and reproducibility.
57
 
58
- At the end of training, [the final model](https://wandb.ai/wandb/huggingtweets/runs/2dipn7wd/artifacts) is logged and versioned.
59
 
60
  ## How to use
61
 
 
1
  ---
2
  language: en
3
+ thumbnail: https://github.com/borisdayma/huggingtweets/blob/master/img/logo.png?raw=true
4
  tags:
5
  - huggingtweets
6
  widget:
 
10
  <div class="inline-flex flex-col" style="line-height: 1.5;">
11
  <div class="flex">
12
  <div
13
+ style="display:inherit; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;https://pbs.twimg.com/profile_images/1459143267673677853/xtIvtfZp_400x400.jpg&#39;)">
14
  </div>
15
  <div
16
  style="display:none; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;&#39;)">
 
43
  | Data | Fox News |
44
  | --- | --- |
45
  | Tweets downloaded | 3250 |
46
+ | Retweets | 84 |
47
+ | Short tweets | 0 |
48
+ | Tweets kept | 3166 |
49
 
50
+ [Explore the data](https://wandb.ai/wandb/huggingtweets/runs/3gz4o7tf/artifacts), which is tracked with [W&B artifacts](https://docs.wandb.com/artifacts) at every step of the pipeline.
51
 
52
  ## Training procedure
53
 
54
  The model is based on a pre-trained [GPT-2](https://huggingface.co/gpt2) which is fine-tuned on @foxnews's tweets.
55
 
56
+ Hyperparameters and metrics are recorded in the [W&B training run](https://wandb.ai/wandb/huggingtweets/runs/10czim3i) for full transparency and reproducibility.
57
 
58
+ At the end of training, [the final model](https://wandb.ai/wandb/huggingtweets/runs/10czim3i/artifacts) is logged and versioned.
59
 
60
  ## How to use
61
 
config.json CHANGED
@@ -8,7 +8,6 @@
8
  "bos_token_id": 50256,
9
  "embd_pdrop": 0.1,
10
  "eos_token_id": 50256,
11
- "gradient_checkpointing": false,
12
  "initializer_range": 0.02,
13
  "layer_norm_epsilon": 1e-05,
14
  "model_type": "gpt2",
@@ -18,7 +17,9 @@
18
  "n_inner": null,
19
  "n_layer": 12,
20
  "n_positions": 1024,
 
21
  "resid_pdrop": 0.1,
 
22
  "scale_attn_weights": true,
23
  "summary_activation": null,
24
  "summary_first_dropout": 0.1,
@@ -36,7 +37,7 @@
36
  }
37
  },
38
  "torch_dtype": "float32",
39
- "transformers_version": "4.9.2",
40
  "use_cache": true,
41
  "vocab_size": 50257
42
  }
 
8
  "bos_token_id": 50256,
9
  "embd_pdrop": 0.1,
10
  "eos_token_id": 50256,
 
11
  "initializer_range": 0.02,
12
  "layer_norm_epsilon": 1e-05,
13
  "model_type": "gpt2",
 
17
  "n_inner": null,
18
  "n_layer": 12,
19
  "n_positions": 1024,
20
+ "reorder_and_upcast_attn": false,
21
  "resid_pdrop": 0.1,
22
+ "scale_attn_by_inverse_layer_idx": false,
23
  "scale_attn_weights": true,
24
  "summary_activation": null,
25
  "summary_first_dropout": 0.1,
 
37
  }
38
  },
39
  "torch_dtype": "float32",
40
+ "transformers_version": "4.17.0",
41
  "use_cache": true,
42
  "vocab_size": 50257
43
  }
pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:bbb8126134b7720a139c40b6643d3159d9e8ba93d5f99e41f2969b090fb9f4b7
3
- size 510403817
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:23946788830b63f55a5fbf715fbb112d89753f1237437b91bfd27eb198ac5c08
3
+ size 510404393
tokenizer.json CHANGED
The diff for this file is too large to render. See raw diff
 
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5de0f0be497e35ee49b93e7afdeb99735e6bf5e80078fe385e44977457d7aad4
3
- size 2671
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2f0c24c386d2621b2f3d7dbc7ca70a5e0d7b959b83004e10e9e282f92e0dae0c
3
+ size 2991