adldtd commited on
Commit
ebe8d8e
·
1 Parent(s): 16088c9

Uploaded project

Browse files
Files changed (6) hide show
  1. authors_all_CUT.json +0 -0
  2. config.json +46 -0
  3. generation.py +12 -0
  4. generator1.py +31 -0
  5. pytorch_model.bin +3 -0
  6. training_args.bin +3 -0
authors_all_CUT.json ADDED
The diff for this file is too large to render. See raw diff
 
config.json ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "distilgpt2",
3
+ "_num_labels": 1,
4
+ "activation_function": "gelu_new",
5
+ "architectures": [
6
+ "GPT2LMHeadModel"
7
+ ],
8
+ "attn_pdrop": 0.1,
9
+ "bos_token_id": 50256,
10
+ "embd_pdrop": 0.1,
11
+ "eos_token_id": 50256,
12
+ "id2label": {
13
+ "0": "LABEL_0"
14
+ },
15
+ "initializer_range": 0.02,
16
+ "label2id": {
17
+ "LABEL_0": 0
18
+ },
19
+ "layer_norm_epsilon": 1e-05,
20
+ "model_type": "gpt2",
21
+ "n_ctx": 1024,
22
+ "n_embd": 768,
23
+ "n_head": 12,
24
+ "n_inner": null,
25
+ "n_layer": 6,
26
+ "n_positions": 1024,
27
+ "reorder_and_upcast_attn": false,
28
+ "resid_pdrop": 0.1,
29
+ "scale_attn_by_inverse_layer_idx": false,
30
+ "scale_attn_weights": true,
31
+ "summary_activation": null,
32
+ "summary_first_dropout": 0.1,
33
+ "summary_proj_to_labels": true,
34
+ "summary_type": "cls_index",
35
+ "summary_use_proj": true,
36
+ "task_specific_params": {
37
+ "text-generation": {
38
+ "do_sample": true,
39
+ "max_length": 50
40
+ }
41
+ },
42
+ "torch_dtype": "float32",
43
+ "transformers_version": "4.25.1",
44
+ "use_cache": true,
45
+ "vocab_size": 50257
46
+ }
generation.py ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import AutoTokenizer
2
+ from transformers import AutoModelWithLMHead
3
+ from transformers import GPT2LMHeadModel
4
+
5
+ FOLDER_NAME = "./distilgpt2_quotes.TRANS"
6
+ model: GPT2LMHeadModel = AutoModelWithLMHead.from_pretrained(FOLDER_NAME)
7
+ tokenizer = AutoTokenizer.from_pretrained("distilgpt2")
8
+
9
+ text = "Plato: "
10
+ ids = tokenizer(text, return_tensors = "pt")
11
+ out = model.generate(**ids, early_stopping = True, max_length = 90, do_sample = True)
12
+ print(tokenizer.decode(out[0]))
generator1.py ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import AutoTokenizer
2
+ from transformers import AutoModelWithLMHead
3
+ from transformers import Trainer, TrainingArguments
4
+ from transformers import DataCollatorForLanguageModeling
5
+
6
+ from datasets import load_dataset
7
+
8
+
9
+ data = load_dataset("json", data_files = "./authors_all_CUT.json")
10
+ data = data["train"].train_test_split(test_size = 0.10)
11
+
12
+ tokenizer = AutoTokenizer.from_pretrained("distilgpt2")
13
+ tokenizer.pad_token = tokenizer.eos_token #Quick fix to an issue; DistilGPT2 does not include a padding token
14
+ def tokenize_datasets(data_set):
15
+ return tokenizer(data_set["text"], padding = False, truncation = True)
16
+
17
+ BATCH_SIZE = 8
18
+ data = data.map(tokenize_datasets, batched = True, batch_size = BATCH_SIZE) #Tokenize and batch all text
19
+
20
+
21
+ FOLDER_NAME = "./distilgpt2_quotes.TRANS"
22
+ model = AutoModelWithLMHead.from_pretrained("distilgpt2")
23
+ #model = AutoModelWithLMHead.from_pretrained(FOLDER_NAME)
24
+ collator = DataCollatorForLanguageModeling(tokenizer, mlm = False)
25
+
26
+ EPOCHS = 5
27
+ training_args = TrainingArguments(FOLDER_NAME, overwrite_output_dir = True, num_train_epochs = EPOCHS, per_device_train_batch_size = BATCH_SIZE, per_device_eval_batch_size = BATCH_SIZE, eval_steps = 400, save_steps = 800)
28
+ trainer = Trainer(model, args = training_args, data_collator = collator, train_dataset = data["train"], eval_dataset = data["test"])
29
+
30
+ trainer.train()
31
+ trainer.save_model()
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:04d4f3bafd9dcf92232437df75a31b0e0c5cfb5f5611b31185862d98040a005a
3
+ size 333969117
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b96db598d5dcd97cb51471c770578b42a77fd3f5160b06bdebc03030e57bb8dd
3
+ size 3375