{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "import os\n",
    "os.environ['CUDA_VISIBLE_DEVICES']='2'"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2022-04-07 18:09:16.286080: W tensorflow/stream_executor/platform/default/dso_loader.cc:64] Could not load dynamic library 'libcudart.so.11.0'; dlerror: libcudart.so.11.0: cannot open shared object file: No such file or directory\n",
      "2022-04-07 18:09:16.286112: I tensorflow/stream_executor/cuda/cudart_stub.cc:29] Ignore above cudart dlerror if you do not have a GPU set up on your machine.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "4.12.2\n"
     ]
    }
   ],
   "source": [
    "import transformers\n",
    "print(transformers.__version__)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "import pandas as pd\n",
    "import numpy as np\n",
    "\n",
    "train = pd.read_csv('/148Dataset/Dataset/CPED/train_split.csv')\n",
    "train = train.groupby('Dialogue_ID')['Utterance'].apply(lambda x:x.str.cat(sep='。')).reset_index()\n",
    "test = pd.read_csv('/148Dataset/Dataset/CPED/valid_split.csv')\n",
    "test = test.groupby('Dialogue_ID')['Utterance'].apply(lambda x:x.str.cat(sep='。')).reset_index()\n",
    "\n",
    "mlm_data = train[['Utterance']]\n",
    "mlm_data = mlm_data.rename(columns={'Utterance':'text'})\n",
    "mlm_data.to_csv('corpus/mlm_data.csv', index=False)\n",
    "\n",
    "mlm_data_val = test[['Utterance']]\n",
    "mlm_data_val = mlm_data_val.rename(columns={'Utterance':'text'})\n",
    "mlm_data_val.to_csv('corpus/mlm_data_val.csv', index=False)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "8086"
      ]
     },
     "execution_count": 4,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "len(mlm_data)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [],
   "source": [
    "import argparse\n",
    "import logging\n",
    "import math\n",
    "import os\n",
    "import random\n",
    "\n",
    "import datasets\n",
    "from datasets import load_dataset\n",
    "from tqdm.auto import tqdm\n",
    "from accelerate import Accelerator\n",
    "\n",
    "import torch\n",
    "from torch.utils.data import DataLoader\n",
    "\n",
    "import transformers\n",
    "from transformers import (\n",
    "    CONFIG_MAPPING, \n",
    "    MODEL_MAPPING, \n",
    "    AdamW, \n",
    "    AutoConfig, \n",
    "    AutoModelForMaskedLM, \n",
    "    AutoTokenizer, \n",
    "    DataCollatorForLanguageModeling, \n",
    "    SchedulerType, \n",
    "    get_scheduler, \n",
    "    set_seed\n",
    ")\n",
    "\n",
    "logger = logging.getLogger(__name__)\n",
    "MODEL_CONFIG_CLASSES = list(MODEL_MAPPING.keys())\n",
    "MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)\n",
    "\n",
    "# from pprint import pprint\n",
    "# pprint(MODEL_TYPES, width=3, compact=True)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [],
   "source": [
    "class TrainConfig:\n",
    "    train_file= 'corpus/mlm_data.csv'\n",
    "    validation_file = 'corpus/mlm_data.csv'\n",
    "    validation_split_percentage= 5\n",
    "    pad_to_max_length= True\n",
    "    model_name_or_path= 'hfl/chinese-roberta-wwm-ext'\n",
    "    config_name= 'hfl/chinese-roberta-wwm-ext'\n",
    "    tokenizer_name= 'hfl/chinese-roberta-wwm-ext'\n",
    "    use_slow_tokenizer= True\n",
    "    per_device_train_batch_size= 4\n",
    "    per_device_eval_batch_size= 4\n",
    "    learning_rate= 5e-5\n",
    "    weight_decay= 1e-4\n",
    "    num_train_epochs= 100 # change to 5\n",
    "    max_train_steps= None\n",
    "    gradient_accumulation_steps= 2\n",
    "    lr_scheduler_type= 'constant_with_warmup'\n",
    "    num_warmup_steps= 200\n",
    "    output_dir= 'output/pretrain'\n",
    "    seed= 2021\n",
    "    model_type= 'roberta'\n",
    "    max_seq_length= 512\n",
    "    line_by_line= False\n",
    "    preprocessing_num_workers= 4\n",
    "    overwrite_cache= True\n",
    "    mlm_probability= 0.15\n",
    "\n",
    "config = TrainConfig()\n",
    "\n",
    "if config.train_file is not None:\n",
    "    extension = config.train_file.split(\".\")[-1]\n",
    "    assert extension in [\"csv\", \"json\", \"txt\"], \"`train_file` should be a csv, json or txt file.\"\n",
    "if config.validation_file is not None:\n",
    "    extension = config.validation_file.split(\".\")[-1]\n",
    "    assert extension in [\"csv\", \"json\", \"txt\"], \"`validation_file` should be a csv, json or txt file.\"\n",
    "if config.output_dir is not None:\n",
    "    os.makedirs(config.output_dir, exist_ok=True)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [],
   "source": [
    "def main():\n",
    "    args = TrainConfig()\n",
    "    accelerator = Accelerator()\n",
    "    logging.basicConfig(\n",
    "        format=\"%(asctime)s - %(levelname)s - %(name)s -   %(message)s\",\n",
    "        datefmt=\"%m/%d/%Y %H:%M:%S\",\n",
    "        level=logging.INFO,\n",
    "    )\n",
    "    logger.info(accelerator.state)\n",
    "    logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)\n",
    "\n",
    "    if accelerator.is_local_main_process:\n",
    "        datasets.utils.logging.set_verbosity_warning()\n",
    "        transformers.utils.logging.set_verbosity_info()\n",
    "    else:\n",
    "        datasets.utils.logging.set_verbosity_error()\n",
    "        transformers.utils.logging.set_verbosity_error()\n",
    "    if args.seed is not None:\n",
    "        set_seed(args.seed)\n",
    "\n",
    "    data_files = {}\n",
    "    if args.train_file is not None:\n",
    "        data_files[\"train\"] = args.train_file\n",
    "    if args.validation_file is not None:\n",
    "        data_files[\"validation\"] = args.validation_file # data_files 包含训练和验证语料的dict\n",
    "    extension = args.train_file.split(\".\")[-1]\n",
    "    if extension == \"txt\":\n",
    "        extension = \"text\"\n",
    "    raw_datasets = load_dataset(extension, data_files=data_files)\n",
    "    \n",
    "    if args.config_name:\n",
    "        config = AutoConfig.from_pretrained(args.config_name)\n",
    "    elif config.model_name_or_path:\n",
    "        config = AutoConfig.from_pretrained(args.model_name_or_path)\n",
    "    else:\n",
    "        config = CONFIG_MAPPING[args.model_type]()\n",
    "        logger.warning(\"You are instantiating a new config instance from scratch.\")\n",
    "\n",
    "    if args.tokenizer_name:\n",
    "        tokenizer = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=not args.use_slow_tokenizer)\n",
    "    elif args.model_name_or_path:\n",
    "        tokenizer = AutoTokenizer.from_pretrained(args.model_name_or_path, use_fast=not args.use_slow_tokenizer)\n",
    "    else:\n",
    "        raise ValueError(\n",
    "            \"You are instantiating a new tokenizer from scratch. This is not supported by this script.\"\n",
    "            \"You can do it from another script, save it, and load it from here, using --tokenizer_name.\"\n",
    "        )\n",
    "    \n",
    "    if args.model_name_or_path:\n",
    "        model = AutoModelForMaskedLM.from_pretrained(\n",
    "            args.model_name_or_path,\n",
    "            from_tf=bool(\".ckpt\" in args.model_name_or_path),\n",
    "            config=config,\n",
    "        )\n",
    "    else:\n",
    "        logger.info(\"Training new model from scratch\")\n",
    "        model = AutoModelForMaskedLM.from_config(config)\n",
    "\n",
    "    model.resize_token_embeddings(len(tokenizer)) # 将len(tokenizer)匹配 The number of new tokens in the embedding matrix.\n",
    "\n",
    "    column_names = raw_datasets[\"train\"].column_names\n",
    "    text_column_name = \"text\" if \"text\" in column_names else column_names[0]\n",
    "\n",
    "    if args.max_seq_length is None:\n",
    "        max_seq_length = tokenizer.model_max_length\n",
    "        if max_seq_length > 1024:\n",
    "            logger.warning(\n",
    "                f\"The tokenizer picked seems to have a very large `model_max_length` ({tokenizer.model_max_length}). \"\n",
    "                \"Picking 1024 instead. You can change that default value by passing --max_seq_length xxx.\"\n",
    "            )\n",
    "            max_seq_length = 1024\n",
    "    else:\n",
    "        if args.max_seq_length > tokenizer.model_max_length:\n",
    "            logger.warning(\n",
    "                f\"The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the\"\n",
    "                f\"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.\"\n",
    "            )\n",
    "        max_seq_length = min(args.max_seq_length, tokenizer.model_max_length)\n",
    "\n",
    "    def tokenize_function(examples):\n",
    "        return tokenizer(examples[text_column_name], return_special_tokens_mask=True)\n",
    "\n",
    "    tokenized_datasets = raw_datasets.map(\n",
    "        tokenize_function,\n",
    "        batched=True,\n",
    "        num_proc=args.preprocessing_num_workers,\n",
    "        remove_columns=column_names,\n",
    "        load_from_cache_file=not args.overwrite_cache,\n",
    "    )\n",
    "\n",
    "    def group_texts(examples):\n",
    "        concatenated_examples = {k: sum(examples[k], []) for k in examples.keys()}\n",
    "        total_length = len(concatenated_examples[list(examples.keys())[0]])\n",
    "        total_length = (total_length // max_seq_length) * max_seq_length\n",
    "        result = {\n",
    "            k: [t[i : i + max_seq_length] for i in range(0, total_length, max_seq_length)]\n",
    "            for k, t in concatenated_examples.items()\n",
    "        }\n",
    "        return result\n",
    "\n",
    "    tokenized_datasets = tokenized_datasets.map(\n",
    "        group_texts,\n",
    "        batched=True,\n",
    "        num_proc=args.preprocessing_num_workers,\n",
    "        load_from_cache_file=not args.overwrite_cache,\n",
    "    )\n",
    "    train_dataset = tokenized_datasets[\"train\"]\n",
    "    eval_dataset = tokenized_datasets[\"validation\"]\n",
    "\n",
    "    data_collator = DataCollatorForLanguageModeling(tokenizer=tokenizer, mlm_probability=args.mlm_probability) # mask\n",
    "    train_dataloader = DataLoader(\n",
    "        train_dataset, shuffle=True, collate_fn=data_collator, batch_size=args.per_device_train_batch_size\n",
    "    )\n",
    "    eval_dataloader = DataLoader(eval_dataset, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size)\n",
    "\n",
    "    no_decay = [\"bias\", \"LayerNorm.weight\"]\n",
    "    optimizer_grouped_parameters = [\n",
    "        {\n",
    "            \"params\": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],\n",
    "            \"weight_decay\": args.weight_decay,\n",
    "        },\n",
    "        {\n",
    "            \"params\": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)],\n",
    "            \"weight_decay\": 0.0,\n",
    "        },\n",
    "    ]\n",
    "    optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate)\n",
    "\n",
    "    model, optimizer, train_dataloader, eval_dataloader = accelerator.prepare(\n",
    "        model, optimizer, train_dataloader, eval_dataloader\n",
    "    )\n",
    "\n",
    "    num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)\n",
    "    if args.max_train_steps is None:\n",
    "        args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch\n",
    "    else:\n",
    "        args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch)\n",
    "\n",
    "    lr_scheduler = get_scheduler(\n",
    "        name=args.lr_scheduler_type,\n",
    "        optimizer=optimizer,\n",
    "        num_warmup_steps=args.num_warmup_steps,\n",
    "        num_training_steps=args.max_train_steps,\n",
    "    )\n",
    "\n",
    "    total_batch_size = args.per_device_train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps\n",
    "\n",
    "    logger.info(\"***** Running training *****\")\n",
    "    logger.info(f\"  Num examples = {len(train_dataset)}\")\n",
    "    logger.info(f\"  Num Epochs = {args.num_train_epochs}\")\n",
    "    logger.info(f\"  Instantaneous batch size per device = {args.per_device_train_batch_size}\")\n",
    "    logger.info(f\"  Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}\")\n",
    "    logger.info(f\"  Gradient Accumulation steps = {args.gradient_accumulation_steps}\")\n",
    "    logger.info(f\"  Total optimization steps = {args.max_train_steps}\")\n",
    "    # Only show the progress bar once on each machine.\n",
    "    progress_bar = tqdm(range(args.max_train_steps), disable=not accelerator.is_local_main_process)\n",
    "    completed_steps = 0\n",
    "\n",
    "    for epoch in range(args.num_train_epochs):\n",
    "        model.train()\n",
    "        for step, batch in enumerate(train_dataloader):\n",
    "            outputs = model(**batch)\n",
    "            loss = outputs.loss\n",
    "            loss = loss / args.gradient_accumulation_steps\n",
    "            accelerator.backward(loss)\n",
    "            if step % args.gradient_accumulation_steps == 0 or step == len(train_dataloader) - 1:\n",
    "                optimizer.step()\n",
    "                lr_scheduler.step()\n",
    "                optimizer.zero_grad()\n",
    "                progress_bar.update(1)\n",
    "                completed_steps += 1\n",
    "\n",
    "            if completed_steps >= args.max_train_steps:\n",
    "                break\n",
    "\n",
    "        model.eval()\n",
    "        losses = []\n",
    "        for step, batch in enumerate(eval_dataloader):\n",
    "            with torch.no_grad():\n",
    "                outputs = model(**batch)\n",
    "\n",
    "            loss = outputs.loss\n",
    "            losses.append(accelerator.gather(loss.repeat(args.per_device_eval_batch_size)))\n",
    "\n",
    "        losses = torch.cat(losses)\n",
    "        losses = losses[: len(eval_dataset)]\n",
    "        perplexity = math.exp(torch.mean(losses))\n",
    "\n",
    "        logger.info(f\"epoch {epoch}: perplexity: {perplexity}\")\n",
    "\n",
    "    if args.output_dir is not None:\n",
    "        accelerator.wait_for_everyone()\n",
    "        unwrapped_model = accelerator.unwrap_model(model)\n",
    "        unwrapped_model.save_pretrained(args.output_dir, save_function=accelerator.save)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "04/07/2022 18:09:21 - INFO - __main__ -   Distributed environment: NO\n",
      "Num processes: 1\n",
      "Process index: 0\n",
      "Local process index: 0\n",
      "Device: cuda\n",
      "Use FP16 precision: False\n",
      "\n",
      "04/07/2022 18:09:23 - WARNING - datasets.builder -   Using custom data configuration default-57414498967e8c6b\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Downloading and preparing dataset csv/default (download: Unknown size, generated: Unknown size, post-processed: Unknown size, total: Unknown size) to /home/phd-fan.weiquan2/.cache/huggingface/datasets/csv/default-57414498967e8c6b/0.0.0/2dc6629a9ff6b5697d82c25b73731dd440507a69cbce8b425db50b751e8fcfd0...\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "54c64910dda446aeb47523b3e5942053",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "HBox(children=(FloatProgress(value=1.0, bar_style='info', layout=Layout(width='20px'), max=1.0), HTML(value=''…"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/home/phd-fan.weiquan2/anaconda3/envs/torch17py38/lib/python3.8/site-packages/datasets/packaged_modules/csv/csv.py:92: FutureWarning: The error_bad_lines argument has been deprecated and will be removed in a future version. Use on_bad_lines in the future.\n",
      "\n",
      "\n",
      "  csv_file_reader = pd.read_csv(\n",
      "/home/phd-fan.weiquan2/anaconda3/envs/torch17py38/lib/python3.8/site-packages/datasets/packaged_modules/csv/csv.py:92: FutureWarning: The warn_bad_lines argument has been deprecated and will be removed in a future version. Use on_bad_lines in the future.\n",
      "\n",
      "\n",
      "  csv_file_reader = pd.read_csv(\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "f4a2ae1909d443d094a729583e410c19",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "HBox(children=(FloatProgress(value=1.0, bar_style='info', layout=Layout(width='20px'), max=1.0), HTML(value=''…"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/home/phd-fan.weiquan2/anaconda3/envs/torch17py38/lib/python3.8/site-packages/datasets/packaged_modules/csv/csv.py:92: FutureWarning: The error_bad_lines argument has been deprecated and will be removed in a future version. Use on_bad_lines in the future.\n",
      "\n",
      "\n",
      "  csv_file_reader = pd.read_csv(\n",
      "/home/phd-fan.weiquan2/anaconda3/envs/torch17py38/lib/python3.8/site-packages/datasets/packaged_modules/csv/csv.py:92: FutureWarning: The warn_bad_lines argument has been deprecated and will be removed in a future version. Use on_bad_lines in the future.\n",
      "\n",
      "\n",
      "  csv_file_reader = pd.read_csv(\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Dataset csv downloaded and prepared to /home/phd-fan.weiquan2/.cache/huggingface/datasets/csv/default-57414498967e8c6b/0.0.0/2dc6629a9ff6b5697d82c25b73731dd440507a69cbce8b425db50b751e8fcfd0. Subsequent calls will reuse this data.\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "loading configuration file https://huggingface.co/hfl/chinese-roberta-wwm-ext/resolve/main/config.json from cache at /home/phd-fan.weiquan2/.cache/huggingface/transformers/b64aa51c20341fe5461d0663677dd1527cc8fb71c482d06ee75406857d0ed53a.ec9dcc5b0fb354ff7e07c612e71b31e4e41d5ffafe36e4ac9b37959db8873460\n",
      "Model config BertConfig {\n",
      "  \"architectures\": [\n",
      "    \"BertForMaskedLM\"\n",
      "  ],\n",
      "  \"attention_probs_dropout_prob\": 0.1,\n",
      "  \"bos_token_id\": 0,\n",
      "  \"classifier_dropout\": null,\n",
      "  \"directionality\": \"bidi\",\n",
      "  \"eos_token_id\": 2,\n",
      "  \"hidden_act\": \"gelu\",\n",
      "  \"hidden_dropout_prob\": 0.1,\n",
      "  \"hidden_size\": 768,\n",
      "  \"initializer_range\": 0.02,\n",
      "  \"intermediate_size\": 3072,\n",
      "  \"layer_norm_eps\": 1e-12,\n",
      "  \"max_position_embeddings\": 512,\n",
      "  \"model_type\": \"bert\",\n",
      "  \"num_attention_heads\": 12,\n",
      "  \"num_hidden_layers\": 12,\n",
      "  \"output_past\": true,\n",
      "  \"pad_token_id\": 0,\n",
      "  \"pooler_fc_size\": 768,\n",
      "  \"pooler_num_attention_heads\": 12,\n",
      "  \"pooler_num_fc_layers\": 3,\n",
      "  \"pooler_size_per_head\": 128,\n",
      "  \"pooler_type\": \"first_token_transform\",\n",
      "  \"position_embedding_type\": \"absolute\",\n",
      "  \"transformers_version\": \"4.12.2\",\n",
      "  \"type_vocab_size\": 2,\n",
      "  \"use_cache\": true,\n",
      "  \"vocab_size\": 21128\n",
      "}\n",
      "\n",
      "loading configuration file https://huggingface.co/hfl/chinese-roberta-wwm-ext/resolve/main/config.json from cache at /home/phd-fan.weiquan2/.cache/huggingface/transformers/b64aa51c20341fe5461d0663677dd1527cc8fb71c482d06ee75406857d0ed53a.ec9dcc5b0fb354ff7e07c612e71b31e4e41d5ffafe36e4ac9b37959db8873460\n",
      "Model config BertConfig {\n",
      "  \"architectures\": [\n",
      "    \"BertForMaskedLM\"\n",
      "  ],\n",
      "  \"attention_probs_dropout_prob\": 0.1,\n",
      "  \"bos_token_id\": 0,\n",
      "  \"classifier_dropout\": null,\n",
      "  \"directionality\": \"bidi\",\n",
      "  \"eos_token_id\": 2,\n",
      "  \"hidden_act\": \"gelu\",\n",
      "  \"hidden_dropout_prob\": 0.1,\n",
      "  \"hidden_size\": 768,\n",
      "  \"initializer_range\": 0.02,\n",
      "  \"intermediate_size\": 3072,\n",
      "  \"layer_norm_eps\": 1e-12,\n",
      "  \"max_position_embeddings\": 512,\n",
      "  \"model_type\": \"bert\",\n",
      "  \"num_attention_heads\": 12,\n",
      "  \"num_hidden_layers\": 12,\n",
      "  \"output_past\": true,\n",
      "  \"pad_token_id\": 0,\n",
      "  \"pooler_fc_size\": 768,\n",
      "  \"pooler_num_attention_heads\": 12,\n",
      "  \"pooler_num_fc_layers\": 3,\n",
      "  \"pooler_size_per_head\": 128,\n",
      "  \"pooler_type\": \"first_token_transform\",\n",
      "  \"position_embedding_type\": \"absolute\",\n",
      "  \"transformers_version\": \"4.12.2\",\n",
      "  \"type_vocab_size\": 2,\n",
      "  \"use_cache\": true,\n",
      "  \"vocab_size\": 21128\n",
      "}\n",
      "\n",
      "loading file https://huggingface.co/hfl/chinese-roberta-wwm-ext/resolve/main/vocab.txt from cache at /home/phd-fan.weiquan2/.cache/huggingface/transformers/92a56e79ec6564fd501527ed88ca336637eb4bfeb28d10580c3bbdfb7889a032.accd894ff58c6ff7bd4f3072890776c14f4ea34fcc08e79cd88c2d157756dceb\n",
      "loading file https://huggingface.co/hfl/chinese-roberta-wwm-ext/resolve/main/added_tokens.json from cache at /home/phd-fan.weiquan2/.cache/huggingface/transformers/87c7eedd995b4bae2c34df3baf2cbd5df5496bed675126427849c72e590f5574.5cc6e825eb228a7a5cfd27cb4d7151e97a79fb962b31aaf1813aa102e746584b\n",
      "loading file https://huggingface.co/hfl/chinese-roberta-wwm-ext/resolve/main/special_tokens_map.json from cache at /home/phd-fan.weiquan2/.cache/huggingface/transformers/d521373fc7ac35f63d56cf303de74a202403dcf1aaa792cd01f653694be59563.dd8bd9bfd3664b530ea4e645105f557769387b3da9f79bdb55ed556bdd80611d\n",
      "loading file https://huggingface.co/hfl/chinese-roberta-wwm-ext/resolve/main/tokenizer_config.json from cache at /home/phd-fan.weiquan2/.cache/huggingface/transformers/5dedf24c46ec573f8a27ddfa6e737869ec8df462a9a7682b35ded34301c5bdc8.d23f50bbddc3fb34db5a76d47fa9bdd5d75bf4201ad2d49abbcca25629b3e562\n",
      "loading file https://huggingface.co/hfl/chinese-roberta-wwm-ext/resolve/main/tokenizer.json from cache at /home/phd-fan.weiquan2/.cache/huggingface/transformers/e6278a884ec926a36ddf8d3cc3c598a65dd410c8c01be870468c4f2f71bee0d7.660ed5c7513bf13d4607410502a84e0de517eb889ff8c401068a1688868e1ccb\n",
      "loading configuration file https://huggingface.co/hfl/chinese-roberta-wwm-ext/resolve/main/config.json from cache at /home/phd-fan.weiquan2/.cache/huggingface/transformers/b64aa51c20341fe5461d0663677dd1527cc8fb71c482d06ee75406857d0ed53a.ec9dcc5b0fb354ff7e07c612e71b31e4e41d5ffafe36e4ac9b37959db8873460\n",
      "Model config BertConfig {\n",
      "  \"architectures\": [\n",
      "    \"BertForMaskedLM\"\n",
      "  ],\n",
      "  \"attention_probs_dropout_prob\": 0.1,\n",
      "  \"bos_token_id\": 0,\n",
      "  \"classifier_dropout\": null,\n",
      "  \"directionality\": \"bidi\",\n",
      "  \"eos_token_id\": 2,\n",
      "  \"hidden_act\": \"gelu\",\n",
      "  \"hidden_dropout_prob\": 0.1,\n",
      "  \"hidden_size\": 768,\n",
      "  \"initializer_range\": 0.02,\n",
      "  \"intermediate_size\": 3072,\n",
      "  \"layer_norm_eps\": 1e-12,\n",
      "  \"max_position_embeddings\": 512,\n",
      "  \"model_type\": \"bert\",\n",
      "  \"num_attention_heads\": 12,\n",
      "  \"num_hidden_layers\": 12,\n",
      "  \"output_past\": true,\n",
      "  \"pad_token_id\": 0,\n",
      "  \"pooler_fc_size\": 768,\n",
      "  \"pooler_num_attention_heads\": 12,\n",
      "  \"pooler_num_fc_layers\": 3,\n",
      "  \"pooler_size_per_head\": 128,\n",
      "  \"pooler_type\": \"first_token_transform\",\n",
      "  \"position_embedding_type\": \"absolute\",\n",
      "  \"transformers_version\": \"4.12.2\",\n",
      "  \"type_vocab_size\": 2,\n",
      "  \"use_cache\": true,\n",
      "  \"vocab_size\": 21128\n",
      "}\n",
      "\n",
      "loading weights file https://huggingface.co/hfl/chinese-roberta-wwm-ext/resolve/main/pytorch_model.bin from cache at /home/phd-fan.weiquan2/.cache/huggingface/transformers/ebc33cec9cd4890c20bd3b688fbf8e907167e0e2f209b801b3159123cd4630e4.d863eb12d1b0d00e5d41e9eb0d41914e4993c03e6de69e67bc10c79818f5fd4d\n",
      "Some weights of the model checkpoint at hfl/chinese-roberta-wwm-ext were not used when initializing BertForMaskedLM: ['cls.seq_relationship.weight', 'cls.seq_relationship.bias']\n",
      "- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n",
      "- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n",
      "All the weights of BertForMaskedLM were initialized from the model checkpoint at hfl/chinese-roberta-wwm-ext.\n",
      "If your task is similar to the task the model of the checkpoint was trained on, you can already use BertForMaskedLM for predictions without further training.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n",
      "\n",
      "\n",
      "\n",
      "\n",
      "\n",
      "\n",
      "\n",
      "\n",
      "\n",
      "\n",
      "\n",
      "\n",
      "\n",
      "\n",
      "\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "04/07/2022 18:09:54 - INFO - __main__ -   ***** Running training *****\n",
      "04/07/2022 18:09:54 - INFO - __main__ -     Num examples = 1707\n",
      "04/07/2022 18:09:54 - INFO - __main__ -     Num Epochs = 100\n",
      "04/07/2022 18:09:54 - INFO - __main__ -     Instantaneous batch size per device = 4\n",
      "04/07/2022 18:09:54 - INFO - __main__ -     Total train batch size (w. parallel, distributed & accumulation) = 8\n",
      "04/07/2022 18:09:54 - INFO - __main__ -     Gradient Accumulation steps = 2\n",
      "04/07/2022 18:09:54 - INFO - __main__ -     Total optimization steps = 21400\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "61ee1d3cbaa54afdb1c1a1df8cf50ba6",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "HBox(children=(FloatProgress(value=0.0, max=21400.0), HTML(value='')))"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "04/07/2022 18:11:36 - INFO - __main__ -   epoch 0: perplexity: 4.133160418614736\n",
      "04/07/2022 18:13:17 - INFO - __main__ -   epoch 1: perplexity: 3.87431797301479\n",
      "04/07/2022 18:15:01 - INFO - __main__ -   epoch 2: perplexity: 3.757365142474581\n",
      "04/07/2022 18:16:43 - INFO - __main__ -   epoch 3: perplexity: 3.6110182168419507\n",
      "04/07/2022 18:18:27 - INFO - __main__ -   epoch 4: perplexity: 3.4716503925662767\n",
      "04/07/2022 18:20:10 - INFO - __main__ -   epoch 5: perplexity: 3.396454410645235\n",
      "04/07/2022 18:21:53 - INFO - __main__ -   epoch 6: perplexity: 3.324705065585053\n",
      "04/07/2022 18:23:36 - INFO - __main__ -   epoch 7: perplexity: 3.216836361878876\n",
      "04/07/2022 18:25:20 - INFO - __main__ -   epoch 8: perplexity: 3.1869885804259575\n",
      "04/07/2022 18:27:04 - INFO - __main__ -   epoch 9: perplexity: 3.118267637263945\n",
      "04/07/2022 18:28:46 - INFO - __main__ -   epoch 10: perplexity: 3.06016623080488\n",
      "04/07/2022 18:30:29 - INFO - __main__ -   epoch 11: perplexity: 2.9760631158684876\n",
      "04/07/2022 18:32:12 - INFO - __main__ -   epoch 12: perplexity: 2.9228982878967833\n",
      "04/07/2022 18:33:58 - INFO - __main__ -   epoch 13: perplexity: 2.8955310341703284\n",
      "04/07/2022 18:35:41 - INFO - __main__ -   epoch 14: perplexity: 2.8030408785107412\n",
      "04/07/2022 18:37:24 - INFO - __main__ -   epoch 15: perplexity: 2.776276865512694\n",
      "04/07/2022 18:39:07 - INFO - __main__ -   epoch 16: perplexity: 2.7487252179279946\n",
      "04/07/2022 18:40:49 - INFO - __main__ -   epoch 17: perplexity: 2.72207287320224\n",
      "04/07/2022 18:42:32 - INFO - __main__ -   epoch 18: perplexity: 2.683951166803343\n",
      "04/07/2022 18:44:15 - INFO - __main__ -   epoch 19: perplexity: 2.639419957059947\n",
      "04/07/2022 18:45:57 - INFO - __main__ -   epoch 20: perplexity: 2.6266752038539285\n",
      "04/07/2022 18:47:40 - INFO - __main__ -   epoch 21: perplexity: 2.5957017019307056\n",
      "04/07/2022 18:49:23 - INFO - __main__ -   epoch 22: perplexity: 2.5360537375263714\n",
      "04/07/2022 18:51:06 - INFO - __main__ -   epoch 23: perplexity: 2.477637632496382\n",
      "04/07/2022 18:52:49 - INFO - __main__ -   epoch 24: perplexity: 2.4604096520622747\n",
      "04/07/2022 18:54:31 - INFO - __main__ -   epoch 25: perplexity: 2.417530818793114\n",
      "04/07/2022 18:56:14 - INFO - __main__ -   epoch 26: perplexity: 2.4017114862548583\n",
      "04/07/2022 18:57:56 - INFO - __main__ -   epoch 27: perplexity: 2.357463106052292\n",
      "04/07/2022 18:59:40 - INFO - __main__ -   epoch 28: perplexity: 2.3595889431476356\n",
      "04/07/2022 19:01:24 - INFO - __main__ -   epoch 29: perplexity: 2.375116104378282\n",
      "04/07/2022 19:03:06 - INFO - __main__ -   epoch 30: perplexity: 2.3401681046876686\n",
      "04/07/2022 19:04:50 - INFO - __main__ -   epoch 31: perplexity: 2.298905829251518\n",
      "04/07/2022 19:06:33 - INFO - __main__ -   epoch 32: perplexity: 2.2680401360550007\n",
      "04/07/2022 19:08:16 - INFO - __main__ -   epoch 33: perplexity: 2.257814960280037\n",
      "04/07/2022 19:10:01 - INFO - __main__ -   epoch 34: perplexity: 2.2272513710260826\n",
      "04/07/2022 19:11:44 - INFO - __main__ -   epoch 35: perplexity: 2.1752243222371335\n",
      "04/07/2022 19:13:27 - INFO - __main__ -   epoch 36: perplexity: 2.20672827307433\n",
      "04/07/2022 19:15:09 - INFO - __main__ -   epoch 37: perplexity: 2.1863400739018775\n",
      "04/07/2022 19:16:51 - INFO - __main__ -   epoch 38: perplexity: 2.118009090381817\n",
      "04/07/2022 19:18:34 - INFO - __main__ -   epoch 39: perplexity: 2.1013288354623274\n",
      "04/07/2022 19:20:17 - INFO - __main__ -   epoch 40: perplexity: 2.1128943477612454\n",
      "04/07/2022 19:21:59 - INFO - __main__ -   epoch 41: perplexity: 2.093426744121975\n",
      "04/07/2022 19:23:43 - INFO - __main__ -   epoch 42: perplexity: 2.0790195251777415\n",
      "04/07/2022 19:25:26 - INFO - __main__ -   epoch 43: perplexity: 2.045516368281551\n",
      "04/07/2022 19:27:09 - INFO - __main__ -   epoch 44: perplexity: 2.028619594164455\n",
      "04/07/2022 19:28:54 - INFO - __main__ -   epoch 45: perplexity: 2.028556235615139\n",
      "04/07/2022 19:30:37 - INFO - __main__ -   epoch 46: perplexity: 1.9874376513243759\n",
      "04/07/2022 19:32:22 - INFO - __main__ -   epoch 47: perplexity: 1.9886917144443426\n",
      "04/07/2022 19:34:06 - INFO - __main__ -   epoch 48: perplexity: 1.9791886431513095\n",
      "04/07/2022 19:35:48 - INFO - __main__ -   epoch 49: perplexity: 1.9650547785379981\n",
      "04/07/2022 19:37:32 - INFO - __main__ -   epoch 50: perplexity: 1.9646686507649302\n",
      "04/07/2022 19:39:16 - INFO - __main__ -   epoch 51: perplexity: 1.9353705837801212\n",
      "04/07/2022 19:40:59 - INFO - __main__ -   epoch 52: perplexity: 1.9155335903242892\n",
      "04/07/2022 19:42:42 - INFO - __main__ -   epoch 53: perplexity: 1.9246970642954597\n",
      "04/07/2022 19:44:25 - INFO - __main__ -   epoch 54: perplexity: 1.8891343533320917\n",
      "04/07/2022 19:46:09 - INFO - __main__ -   epoch 55: perplexity: 1.881074375718469\n",
      "04/07/2022 19:47:53 - INFO - __main__ -   epoch 56: perplexity: 1.8665592284201364\n",
      "04/07/2022 19:49:36 - INFO - __main__ -   epoch 57: perplexity: 1.8567123600926179\n",
      "04/07/2022 19:51:18 - INFO - __main__ -   epoch 58: perplexity: 1.8465157820840763\n",
      "04/07/2022 19:53:02 - INFO - __main__ -   epoch 59: perplexity: 1.8228392661568713\n",
      "04/07/2022 19:54:45 - INFO - __main__ -   epoch 60: perplexity: 1.8121758635432532\n",
      "04/07/2022 19:56:28 - INFO - __main__ -   epoch 61: perplexity: 1.8084070715374307\n",
      "04/07/2022 19:58:11 - INFO - __main__ -   epoch 62: perplexity: 1.8136631796090066\n",
      "04/07/2022 19:59:54 - INFO - __main__ -   epoch 63: perplexity: 1.7868507398543951\n",
      "04/07/2022 20:01:37 - INFO - __main__ -   epoch 64: perplexity: 1.7768559687341923\n",
      "04/07/2022 20:03:20 - INFO - __main__ -   epoch 65: perplexity: 1.7577059955194168\n",
      "04/07/2022 20:05:03 - INFO - __main__ -   epoch 66: perplexity: 1.7765297993585643\n",
      "04/07/2022 20:06:46 - INFO - __main__ -   epoch 67: perplexity: 1.7435425496466261\n",
      "04/07/2022 20:08:29 - INFO - __main__ -   epoch 68: perplexity: 1.7384190903081327\n",
      "04/07/2022 20:10:12 - INFO - __main__ -   epoch 69: perplexity: 1.7202225064262637\n",
      "04/07/2022 20:11:56 - INFO - __main__ -   epoch 70: perplexity: 1.7270597486877826\n",
      "04/07/2022 20:13:39 - INFO - __main__ -   epoch 71: perplexity: 1.708808602293696\n",
      "04/07/2022 20:15:22 - INFO - __main__ -   epoch 72: perplexity: 1.688876017991442\n",
      "04/07/2022 20:17:05 - INFO - __main__ -   epoch 73: perplexity: 1.7075096565613956\n",
      "04/07/2022 20:18:48 - INFO - __main__ -   epoch 74: perplexity: 1.6883730706141475\n",
      "04/07/2022 20:20:31 - INFO - __main__ -   epoch 75: perplexity: 1.66837204979101\n",
      "04/07/2022 20:22:14 - INFO - __main__ -   epoch 76: perplexity: 1.6696450042595303\n",
      "04/07/2022 20:23:57 - INFO - __main__ -   epoch 77: perplexity: 1.673990356464856\n",
      "04/07/2022 20:25:40 - INFO - __main__ -   epoch 78: perplexity: 1.6630043323986463\n",
      "04/07/2022 20:27:22 - INFO - __main__ -   epoch 79: perplexity: 1.642638901931109\n",
      "04/07/2022 20:29:05 - INFO - __main__ -   epoch 80: perplexity: 1.63727439088949\n",
      "04/07/2022 20:30:48 - INFO - __main__ -   epoch 81: perplexity: 1.620994075227607\n",
      "04/07/2022 20:32:31 - INFO - __main__ -   epoch 82: perplexity: 1.6389998462579185\n",
      "04/07/2022 20:34:13 - INFO - __main__ -   epoch 83: perplexity: 1.6229795016250117\n",
      "04/07/2022 20:35:56 - INFO - __main__ -   epoch 84: perplexity: 1.6257278541930364\n",
      "04/07/2022 20:37:38 - INFO - __main__ -   epoch 85: perplexity: 1.6103071903926172\n",
      "04/07/2022 20:39:22 - INFO - __main__ -   epoch 86: perplexity: 1.6083622391739356\n",
      "04/07/2022 20:41:06 - INFO - __main__ -   epoch 87: perplexity: 1.5810240211583895\n",
      "04/07/2022 20:42:49 - INFO - __main__ -   epoch 88: perplexity: 1.5985287369462895\n",
      "04/07/2022 20:44:32 - INFO - __main__ -   epoch 89: perplexity: 1.5910208488915392\n",
      "04/07/2022 20:46:14 - INFO - __main__ -   epoch 90: perplexity: 1.5709715975753555\n",
      "04/07/2022 20:47:57 - INFO - __main__ -   epoch 91: perplexity: 1.57427854705938\n",
      "04/07/2022 20:49:40 - INFO - __main__ -   epoch 92: perplexity: 1.5566599427036296\n",
      "04/07/2022 20:51:23 - INFO - __main__ -   epoch 93: perplexity: 1.569170956865131\n",
      "04/07/2022 20:53:06 - INFO - __main__ -   epoch 94: perplexity: 1.5528696460311708\n",
      "04/07/2022 20:54:49 - INFO - __main__ -   epoch 95: perplexity: 1.5661184956143361\n",
      "04/07/2022 20:56:33 - INFO - __main__ -   epoch 96: perplexity: 1.5454859288468648\n",
      "04/07/2022 20:58:15 - INFO - __main__ -   epoch 97: perplexity: 1.5341122459717433\n",
      "04/07/2022 20:59:58 - INFO - __main__ -   epoch 98: perplexity: 1.537017580354853\n",
      "04/07/2022 21:01:40 - INFO - __main__ -   epoch 99: perplexity: 1.5399791759066235\n",
      "Configuration saved in output/pretrain/config.json\n",
      "Model weights saved in output/pretrain/pytorch_model.bin\n"
     ]
    }
   ],
   "source": [
    "if __name__ == \"__main__\":\n",
    "    main()"
   ]
  }
 ],
 "metadata": {
  "interpreter": {
   "hash": "49e5bd26eb5fb0a8ffb649f62262c127e261ba51230dc2578599ab5938abf7ca"
  },
  "kernelspec": {
   "display_name": "Python 3.8.0 ('torch17py38')",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.8.0"
  },
  "orig_nbformat": 4
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
