spuun commited on
Commit
8a38084
1 Parent(s): b9da828

Add model.

Browse files
README.md CHANGED
@@ -1,3 +1,31 @@
1
  ---
 
 
 
 
 
 
 
 
 
 
2
  license: cc-by-nc-sa-4.0
 
 
 
 
 
3
  ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
+ language:
3
+ - en
4
+ metrics:
5
+ - accuracy
6
+ co2_eq_emissions:
7
+ emissions: "10"
8
+ source: "mlco2.github.io"
9
+ training_type: "fine-tuning"
10
+ geographical_location: "West Java, Indonesia"
11
+ hardware_used: "1 T4"
12
  license: cc-by-nc-sa-4.0
13
+ widget:
14
+ - text: 'You: "Hey kekbot! What\'s up?"\nKekbot: "'
15
+ example_title: "Asking what's up"
16
+ - text: 'You: "Hey kekbot! How r u?"\nKekbot: "'
17
+ example_title: "Asking how he is"
18
  ---
19
+ > THIS MODEL IS INTENDED FOR RESEARCH PURPOSES ONLY
20
+ # Kekbot Mini
21
+
22
+ Based on a `distilgpt2` model, fine-tuned to a select subset (65k<= messages) of Art Union's general-chat channel chat history.
23
+
24
+ ### Limits and biases
25
+ As this is trained on chat history, it is possible that discriminatory or even offensive materials to be outputted.
26
+ Author holds his ground on the fact that ML models are mere statistical representation of the dataset used to train it,
27
+ and that due to the nature of the dataset it is practically impossible to be certain of
28
+ the degree of "cleanliness" that the data contained within holds.
29
+
30
+ Author can confirm, however, that from heuristical testing that the model was not found to be offensive
31
+ to the author himself, hopefully this opinion stays true for everyone in the audience.
all_results.json ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 6.0,
3
+ "eval_accuracy": 0.5450006982265047,
4
+ "eval_loss": 2.7938177585601807,
5
+ "eval_runtime": 1.1695,
6
+ "eval_samples": 28,
7
+ "eval_samples_per_second": 23.942,
8
+ "eval_steps_per_second": 3.42,
9
+ "perplexity": 16.34329560585662,
10
+ "train_loss": 2.866144816080729,
11
+ "train_runtime": 358.7849,
12
+ "train_samples": 527,
13
+ "train_samples_per_second": 8.813,
14
+ "train_steps_per_second": 2.207
15
+ }
config.json ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "distilgpt2",
3
+ "_num_labels": 1,
4
+ "activation_function": "gelu_new",
5
+ "architectures": [
6
+ "GPT2LMHeadModel"
7
+ ],
8
+ "attn_pdrop": 0.1,
9
+ "bos_token_id": 50256,
10
+ "embd_pdrop": 0.1,
11
+ "eos_token_id": 50256,
12
+ "id2label": {
13
+ "0": "LABEL_0"
14
+ },
15
+ "initializer_range": 0.02,
16
+ "label2id": {
17
+ "LABEL_0": 0
18
+ },
19
+ "layer_norm_epsilon": 1e-05,
20
+ "model_type": "gpt2",
21
+ "n_ctx": 1024,
22
+ "n_embd": 768,
23
+ "n_head": 12,
24
+ "n_inner": null,
25
+ "n_layer": 6,
26
+ "n_positions": 1024,
27
+ "reorder_and_upcast_attn": false,
28
+ "resid_pdrop": 0.1,
29
+ "scale_attn_by_inverse_layer_idx": false,
30
+ "scale_attn_weights": true,
31
+ "summary_activation": null,
32
+ "summary_first_dropout": 0.1,
33
+ "summary_proj_to_labels": true,
34
+ "summary_type": "cls_index",
35
+ "summary_use_proj": true,
36
+ "task_specific_params": {
37
+ "text-generation": {
38
+ "do_sample": true,
39
+ "max_length": 50
40
+ }
41
+ },
42
+ "torch_dtype": "float32",
43
+ "transformers_version": "4.20.0.dev0",
44
+ "use_cache": true,
45
+ "vocab_size": 50257
46
+ }
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ad2f62432c7ddfba09749700ec285391d4197d4d1b9dbdcfd381548a31ecc112
3
+ size 333969117
special_tokens_map.json ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "<|endoftext|>",
3
+ "eos_token": "<|endoftext|>",
4
+ "unk_token": "<|endoftext|>"
5
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "bos_token": "<|endoftext|>",
4
+ "eos_token": "<|endoftext|>",
5
+ "model_max_length": 1024,
6
+ "name_or_path": "distilgpt2",
7
+ "special_tokens_map_file": null,
8
+ "tokenizer_class": "GPT2Tokenizer",
9
+ "unk_token": "<|endoftext|>"
10
+ }
trainer_state.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 6.0,
5
+ "global_step": 792,
6
+ "is_hyper_param_search": false,
7
+ "is_local_process_zero": true,
8
+ "is_world_process_zero": true,
9
+ "log_history": [
10
+ {
11
+ "epoch": 3.79,
12
+ "learning_rate": 1.8434343434343433e-05,
13
+ "loss": 2.936,
14
+ "step": 500
15
+ },
16
+ {
17
+ "epoch": 6.0,
18
+ "step": 792,
19
+ "total_flos": 826220325371904.0,
20
+ "train_loss": 2.866144816080729,
21
+ "train_runtime": 358.7849,
22
+ "train_samples_per_second": 8.813,
23
+ "train_steps_per_second": 2.207
24
+ }
25
+ ],
26
+ "max_steps": 792,
27
+ "num_train_epochs": 6,
28
+ "total_flos": 826220325371904.0,
29
+ "trial_name": null,
30
+ "trial_params": null
31
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2b4a45d2b29115fe42958b26f36eca2a446ea4d40b3afa14281303baeaeecc19
3
+ size 3247
vocab.json ADDED
The diff for this file is too large to render. See raw diff