nreimers commited on
Commit
79b3d6f
1 Parent(s): 2262244
README.md ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ # DistilBERT with word2vec token embeddings
2
+
3
+ This model has a word2vec token embedding matrix with 256k entries. The word2vec was trained on 100GB data from C4, MSMARCO, News, Wikipedia, S2ORC, for 3 epochs.
4
+
5
+ Then the model was trained on this dataset with MLM for 250k steps (batch size 64). The token embeddings were NOT updated.
config.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "train-w2v-model/c4_msmarco_news_s2orc_wiki/distilbert-256k/",
3
+ "activation": "gelu",
4
+ "architectures": [
5
+ "DistilBertForMaskedLM"
6
+ ],
7
+ "attention_dropout": 0.1,
8
+ "dim": 768,
9
+ "dropout": 0.1,
10
+ "hidden_dim": 3072,
11
+ "initializer_range": 0.02,
12
+ "max_position_embeddings": 512,
13
+ "model_type": "distilbert",
14
+ "n_heads": 12,
15
+ "n_layers": 6,
16
+ "pad_token_id": 0,
17
+ "qa_dropout": 0.1,
18
+ "seq_classif_dropout": 0.2,
19
+ "sinusoidal_pos_embds": false,
20
+ "tie_weights_": true,
21
+ "torch_dtype": "float32",
22
+ "transformers_version": "4.17.0",
23
+ "vocab_size": 256000
24
+ }
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:da48b857b925b6aa9920aeb9abe7a482c9452d92e0a1856f72bd050cff46f63c
3
+ size 961553391
special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]"}
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"model_max_length": 512, "unk_token": "[UNK]", "cls_token": "[CLS]", "sep_token": "[SEP]", "pad_token": "[PAD]", "mask_token": "[MASK]", "model_input_names": ["input_ids", "attention_mask"], "special_tokens_map_file": "c4_msmarco_news_s2orc_wiki/tokenizer-256k/special_tokens_map.json", "name_or_path": "train-w2v-model/c4_msmarco_news_s2orc_wiki/distilbert-256k/", "tokenizer_class": "PreTrainedTokenizerFast"}
train_script.py ADDED
@@ -0,0 +1,398 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import argparse
3
+ import logging
4
+ import math
5
+ import os
6
+ from datetime import datetime
7
+ import datasets
8
+ import torch
9
+ from torch.utils.data import DataLoader
10
+ from tqdm.auto import tqdm
11
+ import sys
12
+ import transformers
13
+ from accelerate import Accelerator, DistributedType
14
+ from shutil import copyfile
15
+ import wandb
16
+ import numpy as np
17
+
18
+ from transformers import (
19
+ MODEL_MAPPING,
20
+ AutoModelForMaskedLM,
21
+ AutoTokenizer,
22
+ DataCollatorForLanguageModeling,
23
+ SchedulerType,
24
+ get_scheduler
25
+ )
26
+ from transformers.utils.versions import require_version
27
+
28
+
29
+
30
+ class TrainDataset(torch.utils.data.IterableDataset):
31
+ def __init__(self, filepath, tokenizer, max_length, batch_size, train_samples):
32
+ self.tokenizer = tokenizer
33
+ self.fIn = open(filepath)
34
+ self.max_length = max_length
35
+ self.batch_size = batch_size
36
+ self.train_samples = train_samples
37
+
38
+ def __iter__(self):
39
+ batch = []
40
+ for sent in self.fIn:
41
+ batch.append(sent.strip()[0:1000])
42
+
43
+ if len(batch) >= self.batch_size:
44
+ #Use multi process tokenization
45
+ encoded = self.tokenizer(batch, add_special_tokens=True, truncation=True, max_length=self.max_length, return_special_tokens_mask=True, padding=True)
46
+ #print(len(encoded['input_ids'][0]))
47
+ for idx in range(len(batch)):
48
+ single_sample = {key: encoded[key][idx] for key in encoded}
49
+ yield single_sample
50
+
51
+ batch = []
52
+
53
+ def __len__(self):
54
+ return self.train_samples
55
+
56
+
57
+
58
+
59
+
60
+ ## Dev dataset
61
+ class DevDataset(torch.utils.data.Dataset):
62
+ def __init__(self, filepath, tokenizer, max_length):
63
+ self.tokenizer = tokenizer
64
+ self.max_length = max_length
65
+ with open(filepath) as fIn:
66
+ sentences = [sent.strip() for sent in fIn]
67
+
68
+ self.num_sentences = len(sentences)
69
+ self.tokenized = self.tokenizer(sentences, add_special_tokens=True, truncation=True, max_length=self.max_length, return_special_tokens_mask=True)
70
+
71
+ def __getitem__(self, idx):
72
+ return {key: self.tokenized[key][idx] for key in self.tokenized}
73
+
74
+ def __len__(self):
75
+ return self.num_sentences
76
+
77
+
78
+
79
+ logger = logging.getLogger(__name__)
80
+ require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/language-modeling/requirements.txt")
81
+ MODEL_CONFIG_CLASSES = list(MODEL_MAPPING.keys())
82
+ MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
83
+
84
+
85
+ def parse_args():
86
+ parser = argparse.ArgumentParser(description="Finetune a transformers model on a Masked Language Modeling task")
87
+ parser.add_argument(
88
+ "--dataset_config_name",
89
+ type=str,
90
+ default=None,
91
+ help="The configuration name of the dataset to use (via the datasets library).",
92
+ )
93
+ parser.add_argument(
94
+ "--train_file", type=str, default=None, help="A text file data (1 text per line).."
95
+ )
96
+ parser.add_argument(
97
+ "--dev_file", type=str, default=None, help="A text file data (1 text per line)."
98
+ )
99
+ parser.add_argument(
100
+ "--model_name",
101
+ default="nicoladecao/msmarco-word2vec256000-distilbert-base-uncased",
102
+ type=str,
103
+ help="Path to pretrained model or model identifier from huggingface.co/models."
104
+ )
105
+ parser.add_argument(
106
+ "--per_device_batch_size",
107
+ type=int,
108
+ default=16,
109
+ help="Batch size (per device) for the training dataloader.",
110
+ )
111
+ parser.add_argument(
112
+ "--learning_rate",
113
+ type=float,
114
+ default=5e-5,
115
+ help="Initial learning rate (after the potential warmup period) to use.",
116
+ )
117
+ parser.add_argument("--weight_decay", type=float, default=0.01, help="Weight decay to use.")
118
+ parser.add_argument("--num_train_epochs", type=int, default=1, help="Total number of training epochs to perform.")
119
+ parser.add_argument(
120
+ "--max_train_steps",
121
+ type=int,
122
+ help="Total number of training steps to perform. If provided, overrides num_train_epochs.",
123
+ )
124
+ parser.add_argument(
125
+ "--gradient_accumulation_steps",
126
+ type=int,
127
+ default=1,
128
+ help="Number of updates steps to accumulate before performing a backward/update pass.",
129
+ )
130
+ parser.add_argument(
131
+ "--lr_scheduler_type",
132
+ type=SchedulerType,
133
+ default="linear",
134
+ help="The scheduler type to use.",
135
+ choices=["linear", "cosine", "cosine_with_restarts", "polynomial", "constant", "constant_with_warmup"],
136
+ )
137
+ parser.add_argument(
138
+ "--num_warmup_steps", type=int, default=1000, help="Number of steps for the warmup in the lr scheduler."
139
+ )
140
+ parser.add_argument(
141
+ "--model_type",
142
+ type=str,
143
+ default=None,
144
+ help="Model type to use if training from scratch.",
145
+ choices=MODEL_TYPES,
146
+ )
147
+ parser.add_argument(
148
+ "--max_seq_length",
149
+ type=int,
150
+ default=256,
151
+ help="The maximum total input sequence length after tokenization. Sequences longer than this will be truncated.",
152
+ )
153
+ parser.add_argument(
154
+ "--line_by_line",
155
+ type=bool,
156
+ default=True,
157
+ help="Whether distinct lines of text in the dataset are to be handled as distinct sequences.",
158
+ )
159
+ parser.add_argument(
160
+ "--overwrite_cache", type=bool, default=False, help="Overwrite the cached training and evaluation sets"
161
+ )
162
+ parser.add_argument(
163
+ "--mlm_probability", type=float, default=0.15, help="Ratio of tokens to mask for masked language modeling loss"
164
+ )
165
+ parser.add_argument("--mixed_precision", default="fp16")
166
+ parser.add_argument("--train_samples", required=True, type=int)
167
+ parser.add_argument("--eval_steps", default=10000, type=int)
168
+ parser.add_argument("--max_grad_norm", default=1.0, type=float)
169
+ parser.add_argument("--project", default="bert-word2vec")
170
+ parser.add_argument("--freeze_emb_layer", default=False, action='store_true')
171
+ parser.add_argument("--log_interval", default=1000, type=int)
172
+ parser.add_argument("--ckp_steps", default=50000, type=int)
173
+
174
+ args = parser.parse_args()
175
+
176
+
177
+ return args
178
+
179
+
180
+ def main():
181
+ args = parse_args()
182
+
183
+ # Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
184
+ accelerator = Accelerator(mixed_precision=args.mixed_precision)
185
+ # Make one log on every process with the configuration for debugging.
186
+ logging.basicConfig(
187
+ format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
188
+ datefmt="%m/%d/%Y %H:%M:%S",
189
+ level=logging.INFO,
190
+ )
191
+ logger.info(accelerator.state)
192
+
193
+ # Setup logging, we only want one process per machine to log things on the screen.
194
+ # accelerator.is_local_main_process is only True for one process per machine.
195
+ logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
196
+ if accelerator.is_local_main_process:
197
+ datasets.utils.logging.set_verbosity_warning()
198
+ transformers.utils.logging.set_verbosity_info()
199
+ else:
200
+ datasets.utils.logging.set_verbosity_error()
201
+ transformers.utils.logging.set_verbosity_error()
202
+
203
+
204
+ accelerator.wait_for_everyone()
205
+
206
+
207
+ #Load model
208
+ tokenizer = AutoTokenizer.from_pretrained(args.model_name)
209
+ model = AutoModelForMaskedLM.from_pretrained(args.model_name)
210
+
211
+ #Freeze emb layer
212
+ if args.freeze_emb_layer:
213
+ model.distilbert.embeddings.word_embeddings.requires_grad_(False)
214
+
215
+ # Logging & Co on main process
216
+ if accelerator.is_main_process:
217
+ exp_name = f'{args.model_name.replace("/", "-")}-{"freeze_emb" if args.freeze_emb_layer else "update_emb"}-{datetime.now().strftime("%Y-%m-%d_%H-%M-%S")}'
218
+ output_dir = os.path.join("output-mlm", exp_name)
219
+ wandb.init(project=args.project, name=exp_name, config=args)
220
+
221
+ os.makedirs(output_dir, exist_ok=False)
222
+
223
+ #Save tokenizer
224
+ tokenizer.save_pretrained(output_dir)
225
+
226
+ #Save train script
227
+ train_script_path = os.path.join(output_dir, 'train_script.py')
228
+ copyfile(__file__, train_script_path)
229
+ with open(train_script_path, 'a') as fOut:
230
+ fOut.write("\n\n# Script was called via:\n#python " + " ".join(sys.argv))
231
+
232
+
233
+ total_batch_size = args.per_device_batch_size * accelerator.num_processes * args.gradient_accumulation_steps
234
+
235
+ train_dataset = TrainDataset(args.train_file, tokenizer, args.max_seq_length, batch_size=total_batch_size, train_samples=args.train_samples)
236
+ eval_dataset = DevDataset(args.dev_file, tokenizer, args.max_seq_length)
237
+
238
+
239
+ # Data collator
240
+ # This one will take care of randomly masking the tokens.
241
+ data_collator = DataCollatorForLanguageModeling(tokenizer=tokenizer, mlm_probability=args.mlm_probability)
242
+
243
+ # DataLoaders creation:
244
+ train_dataloader = DataLoader(train_dataset, collate_fn=data_collator, batch_size=args.per_device_batch_size)
245
+ eval_dataloader = DataLoader(eval_dataset, collate_fn=data_collator, batch_size=args.per_device_batch_size)
246
+
247
+ # Optimizer
248
+ # Split weights in two groups, one with weight decay and the other not.
249
+ no_decay = ["bias", "LayerNorm.weight"]
250
+ optimizer_grouped_parameters = [
251
+ {
252
+ "params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
253
+ "weight_decay": args.weight_decay,
254
+ },
255
+ {
256
+ "params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)],
257
+ "weight_decay": 0.0,
258
+ },
259
+ ]
260
+ optimizer = torch.optim.AdamW(optimizer_grouped_parameters, lr=args.learning_rate)
261
+
262
+ # Prepare everything with our `accelerator`.
263
+ model, optimizer, train_dataloader, eval_dataloader = accelerator.prepare(model, optimizer, train_dataloader, eval_dataloader)
264
+
265
+ # On TPU, the tie weights in our model have been disconnected, so we need to restore the ties.
266
+ if accelerator.distributed_type == DistributedType.TPU:
267
+ model.tie_weights()
268
+
269
+ # Note -> the training dataloader needs to be prepared before we grab his length below (cause its length will be
270
+ # shorter in multiprocess)
271
+
272
+ # Scheduler and math around the number of training steps.
273
+ num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)
274
+ if args.max_train_steps is None:
275
+ args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch
276
+ else:
277
+ args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch)
278
+
279
+ lr_scheduler = get_scheduler(
280
+ name=args.lr_scheduler_type,
281
+ optimizer=optimizer,
282
+ num_warmup_steps=args.num_warmup_steps,
283
+ num_training_steps=args.max_train_steps,
284
+ )
285
+
286
+
287
+ # Train!
288
+ logger.info("***** Running training *****")
289
+ logger.info(f" Num examples = {args.train_samples}")
290
+ logger.info(f" Num Epochs = {args.num_train_epochs}")
291
+ logger.info(f" Instantaneous batch size per device = {args.per_device_batch_size}")
292
+ logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}")
293
+ logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}")
294
+ logger.info(f" Total optimization steps = {args.max_train_steps}")
295
+ # Only show the progress bar once on each machine.
296
+ progress_bar = tqdm(range(args.max_train_steps), disable=not accelerator.is_local_main_process, smoothing=0.05)
297
+ completed_steps = 0
298
+ train_loss_values = []
299
+
300
+ best_eval_loss = 999999
301
+ if accelerator.is_main_process:
302
+ best_ckp_dir = os.path.join(output_dir, "best")
303
+ tokenizer.save_pretrained(best_ckp_dir)
304
+
305
+ for epoch in range(args.num_train_epochs):
306
+ logger.info(f"Start epoch {epoch}")
307
+ model.train()
308
+ for step, batch in enumerate(train_dataloader):
309
+ outputs = model(**batch)
310
+ loss = outputs.loss
311
+ loss = loss / args.gradient_accumulation_steps
312
+
313
+ if accelerator.is_main_process:
314
+ train_loss_values.append(loss.cpu().item())
315
+
316
+ accelerator.backward(loss)
317
+ accelerator.clip_grad_norm_(model.parameters(), args.max_grad_norm)
318
+ if step % args.gradient_accumulation_steps == 0:
319
+ optimizer.step()
320
+ lr_scheduler.step()
321
+ optimizer.zero_grad()
322
+ progress_bar.update(1)
323
+ completed_steps += 1
324
+
325
+ ### Do logging
326
+ if accelerator.is_main_process:
327
+ if completed_steps % args.log_interval == 0:
328
+ wandb.log({"train/loss": np.mean(train_loss_values)}, step=completed_steps)
329
+ train_loss_values = []
330
+
331
+
332
+ if completed_steps % args.eval_steps == 0:
333
+ model.eval()
334
+ losses = []
335
+ for step, batch in enumerate(eval_dataloader):
336
+ with torch.no_grad():
337
+ outputs = model(**batch)
338
+
339
+ loss = outputs.loss
340
+ losses.append(accelerator.gather(loss.repeat(args.per_device_batch_size)))
341
+
342
+ losses = torch.cat(losses)
343
+ losses = losses[: len(eval_dataset)]
344
+ try:
345
+ eval_loss = torch.mean(losses)
346
+ except OverflowError:
347
+ eval_loss = float("inf")
348
+
349
+ logger.info(f"step {completed_steps}: perplexity: {eval_loss}")
350
+ if accelerator.is_main_process:
351
+ wandb.log({"eval/loss": eval_loss}, step=completed_steps)
352
+
353
+ model.train()
354
+
355
+ #Save model
356
+ accelerator.wait_for_everyone()
357
+ if accelerator.is_main_process:
358
+ unwrapped_model = accelerator.unwrap_model(model)
359
+ unwrapped_model.save_pretrained(output_dir, save_function=accelerator.save)
360
+ with open(os.path.join(output_dir, "train_steps.log"), 'a') as fOut:
361
+ fOut.write(f"{completed_steps}: {eval_loss}\n")
362
+
363
+ #Save best model
364
+ if eval_loss < best_eval_loss:
365
+ best_eval_loss = eval_loss
366
+ unwrapped_model.save_pretrained(best_ckp_dir, save_function=accelerator.save)
367
+ with open(os.path.join(best_ckp_dir, "train_steps.log"), 'a') as fOut:
368
+ fOut.write(f"{completed_steps}: {eval_loss}\n")
369
+
370
+ if accelerator.is_main_process and completed_steps % args.ckp_steps == 0:
371
+ ckp_dir = os.path.join(output_dir, f"ckp-{int(completed_steps/1000)}k")
372
+ unwrapped_model = accelerator.unwrap_model(model)
373
+ unwrapped_model.save_pretrained(ckp_dir, save_function=accelerator.save)
374
+ tokenizer.save_pretrained(ckp_dir)
375
+ with open(os.path.join(ckp_dir, "train_steps.log"), 'a') as fOut:
376
+ fOut.write(f"{completed_steps}: {eval_loss}\n")
377
+
378
+
379
+ if completed_steps >= args.max_train_steps:
380
+ break
381
+
382
+ if args.output_dir is not None:
383
+ accelerator.wait_for_everyone()
384
+ if accelerator.is_main_process:
385
+ unwrapped_model = accelerator.unwrap_model(model)
386
+ unwrapped_model.save_pretrained(output_dir, save_function=accelerator.save)
387
+ with open(os.path.join(output_dir, "train_steps.log"), 'a') as fOut:
388
+ fOut.write(f"{completed_steps}\n")
389
+
390
+
391
+
392
+
393
+ if __name__ == "__main__":
394
+ main()
395
+
396
+
397
+ # Script was called via:
398
+ #python train_mlm-iterable.py --train_file data/c4_msmarco_news_s2orc_wiki_train.txt --dev_file data/c4_msmarco_news_s2orc_wiki_dev.txt --train_samples 100000000 --model_name train-w2v-model/c4_msmarco_news_s2orc_wiki/distilbert-256k/ --freeze_emb_layer
train_steps.log ADDED
@@ -0,0 +1,102 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 10000: 3.6185991764068604
2
+ 20000: 3.181567430496216
3
+ 30000: 3.019852638244629
4
+ 40000: 2.8929433822631836
5
+ 50000: 2.865853786468506
6
+ 60000: 2.8218629360198975
7
+ 70000: 2.7376461029052734
8
+ 80000: 2.7601311206817627
9
+ 90000: 2.698227882385254
10
+ 100000: 2.6650893688201904
11
+ 110000: 2.6815457344055176
12
+ 120000: 2.6339340209960938
13
+ 130000: 2.593796730041504
14
+ 140000: 2.6141812801361084
15
+ 150000: 2.6021640300750732
16
+ 160000: 2.570080280303955
17
+ 170000: 2.5702555179595947
18
+ 180000: 2.5539512634277344
19
+ 190000: 2.5419578552246094
20
+ 200000: 2.551203727722168
21
+ 210000: 2.4972760677337646
22
+ 220000: 2.5177388191223145
23
+ 230000: 2.5238850116729736
24
+ 240000: 2.5064241886138916
25
+ 250000: 2.5157675743103027
26
+ 260000: 2.4895386695861816
27
+ 270000: 2.481090545654297
28
+ 280000: 2.49038028717041
29
+ 290000: 2.4765520095825195
30
+ 300000: 2.463596820831299
31
+ 310000: 2.464102268218994
32
+ 320000: 2.4584429264068604
33
+ 330000: 2.4655401706695557
34
+ 340000: 2.4645512104034424
35
+ 350000: 2.450732469558716
36
+ 360000: 2.443289279937744
37
+ 370000: 2.4305179119110107
38
+ 380000: 2.4552500247955322
39
+ 390000: 2.4438211917877197
40
+ 400000: 2.4352035522460938
41
+ 410000: 2.4060347080230713
42
+ 420000: 2.4099512100219727
43
+ 430000: 2.4188332557678223
44
+ 440000: 2.4242491722106934
45
+ 450000: 2.410978317260742
46
+ 460000: 2.4330966472625732
47
+ 470000: 2.376832962036133
48
+ 480000: 2.399747133255005
49
+ 490000: 2.40126895904541
50
+ 500000: 2.4065797328948975
51
+ 510000: 2.3685810565948486
52
+ 520000: 2.3840038776397705
53
+ 530000: 2.3881959915161133
54
+ 540000: 2.4079036712646484
55
+ 550000: 2.3647472858428955
56
+ 560000: 2.3705577850341797
57
+ 570000: 2.3733468055725098
58
+ 580000: 2.3845152854919434
59
+ 590000: 2.378904342651367
60
+ 600000: 2.3556222915649414
61
+ 610000: 2.3582944869995117
62
+ 620000: 2.364562511444092
63
+ 630000: 2.358213424682617
64
+ 640000: 2.366999864578247
65
+ 650000: 2.3657848834991455
66
+ 660000: 2.360605001449585
67
+ 670000: 2.3360767364501953
68
+ 680000: 2.353294610977173
69
+ 690000: 2.327178955078125
70
+ 700000: 2.333509683609009
71
+ 710000: 2.367043972015381
72
+ 720000: 2.3473172187805176
73
+ 730000: 2.3191168308258057
74
+ 740000: 2.3143470287323
75
+ 750000: 2.328407049179077
76
+ 760000: 2.3401668071746826
77
+ 770000: 2.34131121635437
78
+ 780000: 2.3470940589904785
79
+ 790000: 2.358293294906616
80
+ 800000: 2.321796417236328
81
+ 810000: 2.344054698944092
82
+ 820000: 2.3168962001800537
83
+ 830000: 2.3057608604431152
84
+ 840000: 2.2876601219177246
85
+ 850000: 2.3336281776428223
86
+ 860000: 2.3033647537231445
87
+ 870000: 2.328824758529663
88
+ 880000: 2.3161988258361816
89
+ 890000: 2.3136658668518066
90
+ 900000: 2.316829204559326
91
+ 910000: 2.311969757080078
92
+ 920000: 2.303323745727539
93
+ 930000: 2.295837163925171
94
+ 940000: 2.2938590049743652
95
+ 950000: 2.3025436401367188
96
+ 960000: 2.3099873065948486
97
+ 970000: 2.2945451736450195
98
+ 980000: 2.253411293029785
99
+ 990000: 2.3047292232513428
100
+ 1000000: 2.27850341796875
101
+ 1010000: 2.2839760780334473
102
+ 1020000: 2.27585506439209
word2vec/word2vec.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:99da0c719746314682d2e76c863ffd249033cf648803bbe725913d0ee0fe73e2
3
+ size 6525189
word2vec/word2vec.model.syn1neg.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9426e8e3dd37097a6f1c134f96be7b81d41ae57339ef03882977851ae0a014b7
3
+ size 786432128
word2vec/word2vec.model.wv.vectors.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:77d0763007d8994c7ac384164d72f45cea836e503809e6ff2f621af64b9227dc
3
+ size 786432128