{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# MobileBert 下游任务\n",
    "\n",
    "## 数据准备\n",
    "我们本次使用的数据集为**GLUE数据集**，GLUE包含九项NLU任务，语言均为英语。GLUE九项任务涉及到自然语言推断、文本蕴含、情感分析、语义相似等多个任务，本下游任务主要使用的是其中的SST-2。\n",
    "\n",
    "SST-2(The Stanford Sentiment Treebank，斯坦福情感树库)，单句子分类任务，包含电影评论中的句子和它们情感的人类注释。这项任务是给定句子的情感，类别分为两类正面情感（positive，样本标签对应为1）和负面情感（negative，样本标签对应为0），并且只用句子级别的标签。也就是，本任务也是一个二分类任务，针对句子级别，分为正面和负面情感。\n",
    "\n",
    "- 样本个数：训练集67, 350个，开发集873个，测试集1, 821个。\n",
    "- 任务：情感分类，正面情感和负面情感二分类。\n",
    "- 评价准则：accuracy。\n",
    "### 数据下载\n",
    "运行src/download_glue_data.py并指定输出文件夹即可下载数据集。"
   ]
  },
  {
   "attachments": {},
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 预训练模型\n",
    "将`config.json` 和 `vocab.txt` 以及预训练权重都放入同一文件夹"
   ]
  },
  {
   "attachments": {},
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 训练与推理代码"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "\"\"\" Finetuning the library models for sequence classification .\"\"\"\n",
    "\n",
    "from __future__ import absolute_import, division, print_function\n",
    "import time\n",
    "import os\n",
    "import argparse\n",
    "import numpy as np\n",
    "import mindspore\n",
    "import mindspore.ops as ops\n",
    "from mindspore.dataset import text\n",
    "from mindspore import nn, Tensor, load_checkpoint, save_checkpoint\n",
    "from mindspore.dataset import RandomSampler,  DistributedSampler, NumpySlicesDataset, SequentialSampler\n",
    "from mindspore.communication.management import init, get_group_size\n",
    "\n",
    "from mindnlp.models.mobilebert.mobilebert import MobileBertForSequenceClassification\n",
    "from mindnlp.models.mobilebert.mobilebert_config import MobileBertConfig\n",
    "\n",
    "from mindnlp.transforms.tokenizers.bert_tokenizer import BertTokenizer\n",
    "\n",
    "from src.glue_compute_metrics import compute_metrics\n",
    "from src import glue_output_modes as output_modes\n",
    "from src import glue_processors as processors\n",
    "from src import glue_convert_examples_to_features as convert_examples_to_features"
   ]
  },
  {
   "attachments": {},
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 定义训练过程可视化工具"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "class ProgressBar(object):\n",
    "    '''\n",
    "    custom progress bar\n",
    "    Example:\n",
    "        >>> pbar = ProgressBar(n_total=30,desc='training')\n",
    "        >>> step = 2\n",
    "        >>> pbar(step=step)\n",
    "    '''\n",
    "    def __init__(self, n_total,width=30,desc = 'Training'):\n",
    "        self.width = width\n",
    "        self.n_total = n_total\n",
    "        self.start_time = time.time()\n",
    "        self.desc = desc\n",
    "\n",
    "    def __call__(self, step, info={}):\n",
    "        now = time.time()\n",
    "        current = step + 1\n",
    "        recv_per = current / self.n_total\n",
    "        bar = f'[{self.desc}] {current}/{self.n_total} ['\n",
    "        if recv_per >= 1:\n",
    "            recv_per = 1\n",
    "        prog_width = int(self.width * recv_per)\n",
    "        if prog_width > 0:\n",
    "            bar += '=' * (prog_width - 1)\n",
    "            if current< self.n_total:\n",
    "                bar += \">\"\n",
    "            else:\n",
    "                bar += '='\n",
    "        bar += '.' * (self.width - prog_width)\n",
    "        bar += ']'\n",
    "        show_bar = f\"\\r{bar}\"\n",
    "        time_per_unit = (now - self.start_time) / current\n",
    "        if current < self.n_total:\n",
    "            eta = time_per_unit * (self.n_total - current)\n",
    "            if eta > 3600:\n",
    "                eta_format = ('%d:%02d:%02d' %\n",
    "                              (eta // 3600, (eta % 3600) // 60, eta % 60))\n",
    "            elif eta > 60:\n",
    "                eta_format = '%d:%02d' % (eta // 60, eta % 60)\n",
    "            else:\n",
    "                eta_format = '%ds' % eta\n",
    "            time_info = f' - ETA: {eta_format}'\n",
    "        else:\n",
    "            if time_per_unit >= 1:\n",
    "                time_info = f' {time_per_unit:.1f}s/step'\n",
    "            elif time_per_unit >= 1e-3:\n",
    "                time_info = f' {time_per_unit * 1e3:.1f}ms/step'\n",
    "            else:\n",
    "                time_info = f' {time_per_unit * 1e6:.1f}us/step'\n",
    "\n",
    "        show_bar += time_info\n",
    "        if len(info) != 0:\n",
    "            show_info = f'{show_bar} ' + \\\n",
    "                        \"-\".join([f' {key}: {value:.4f} ' for key, value in info.items()])\n",
    "            print(show_info, end='')\n",
    "        else:\n",
    "            print(show_bar, end='')"
   ]
  },
  {
   "attachments": {},
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 配置日志"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "from pathlib import Path\n",
    "import logging\n",
    "\n",
    "logger = logging.getLogger()\n",
    "\n",
    "def init_logger(log_file=None, log_file_level=logging.NOTSET):\n",
    "    '''\n",
    "    Example:\n",
    "        >>> init_logger(log_file)\n",
    "        >>> logger.info(\"abc'\")\n",
    "    '''\n",
    "    if isinstance(log_file,Path):\n",
    "        log_file = str(log_file)\n",
    "\n",
    "    log_format = logging.Formatter(fmt='%(asctime)s - %(levelname)s - %(name)s -   %(message)s',\n",
    "                                   datefmt='%m/%d/%Y %H:%M:%S')\n",
    "    logger = logging.getLogger()\n",
    "    logger.setLevel(logging.INFO)\n",
    "    console_handler = logging.StreamHandler()\n",
    "    console_handler.setFormatter(log_format)\n",
    "    logger.handlers = [console_handler]\n",
    "    if log_file and log_file != '':\n",
    "        file_handler = logging.FileHandler(log_file)\n",
    "        file_handler.setLevel(log_file_level)\n",
    "        file_handler.setFormatter(log_format)\n",
    "        logger.addHandler(file_handler)\n",
    "    return logger"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 模型训练"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "MODEL_CLASSES = {\n",
    "    \"mobilebert\": (MobileBertConfig, MobileBertForSequenceClassification, BertTokenizer),\n",
    "}\n",
    "\n",
    "def train(args, train_dataset, model):\n",
    "    \"\"\" Train the model \"\"\"\n",
    "    args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)\n",
    "\n",
    "    train_sampler = RandomSampler()\n",
    "    train_dataloader =NumpySlicesDataset(train_dataset, sampler=train_sampler)\n",
    "    train_dataloader = train_dataloader.batch(args.train_batch_size)\n",
    "    if args.max_steps > 0:\n",
    "        num_training_steps = args.max_steps\n",
    "        args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1\n",
    "    else:\n",
    "        num_training_steps = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs\n",
    "    args.warmup_steps = int(num_training_steps * args.warmup_proportion)\n",
    "    # Prepare optimizer (linear warmup and decay)\n",
    "    optimizer_grouped_parameters = [\n",
    "        {'params': list(filter(lambda x: 'bias' not in x.name and 'LayerNorm.weight' not in x.name, model.trainable_params())),\n",
    "         'weight_decay': args.weight_decay},\n",
    "        {'params': list(filter(lambda x: 'bias' in x.name or 'LayerNorm.weight' in x.name, model.trainable_params())), 'weight_decay': 0.0}\n",
    "    ]\n",
    "    optimizer = nn.AdamWeightDecay(optimizer_grouped_parameters, learning_rate=args.learning_rate, eps=args.adam_epsilon)\n",
    "    if args.fp16:\n",
    "        try:\n",
    "            from apex import amp\n",
    "        except ImportError:\n",
    "            raise ImportError(\"Please install apex from https://www.github.com/nvidia/apex to use fp16 training.\")\n",
    "        model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)\n",
    "\n",
    "    # Train!\n",
    "    logger.info(\"***** Running training *****\")\n",
    "    logger.info(\"  Num examples = %d\", len(train_dataset[0]))\n",
    "    logger.info(\"  Num Epochs = %d\", args.num_train_epochs)\n",
    "    logger.info(\"  Instantaneous batch size per GPU = %d\", args.per_gpu_train_batch_size)\n",
    "    logger.info(\"  Total train batch size (w. parallel, distributed & accumulation) = %d\",\n",
    "                args.train_batch_size * args.gradient_accumulation_steps)\n",
    "    logger.info(\"  Gradient Accumulation steps = %d\", args.gradient_accumulation_steps)\n",
    "    logger.info(\"  Total optimization steps = %d\", num_training_steps)\n",
    "\n",
    "    global_step = 0\n",
    "    tr_loss, logging_loss = 0.0, 0.0\n",
    "    loss_fn = nn.CrossEntropyLoss()\n",
    "    for _ in range(int(args.num_train_epochs)):\n",
    "        pbar = ProgressBar(n_total=len(train_dataloader), desc='Training')\n",
    "        def forward_fn(data, label):\n",
    "            logits = model(**data)[1]\n",
    "            loss = loss_fn(logits.view(-1, 2), label.view(-1))\n",
    "            return loss, logits\n",
    "\n",
    "        # Get gradient function\n",
    "        grad_fn = mindspore.value_and_grad(forward_fn, None, optimizer.parameters, has_aux=True)\n",
    "\n",
    "        # Define function of one-step training\n",
    "        def train_step(data, label):\n",
    "            (loss, _), grads = grad_fn(data, label)\n",
    "            loss = ops.depend(loss, optimizer(ops.clip_by_global_norm(grads, args.max_grad_norm)))\n",
    "            return loss\n",
    "        model.set_train()\n",
    "        for batch, (all_input_ids, all_attention_mask, all_token_type_ids, all_lens, all_labels ) in enumerate(train_dataloader):\n",
    "            inputs = {'input_ids': all_input_ids,\n",
    "                      'attention_mask': all_attention_mask,\n",
    "                      'labels': Tensor(all_labels, mindspore.int32)}\n",
    "            inputs['token_type_ids'] = all_token_type_ids\n",
    "            loss = train_step(inputs, Tensor(all_labels, mindspore.int32))\n",
    "            if args.gradient_accumulation_steps > 1:\n",
    "                loss = loss / args.gradient_accumulation_steps\n",
    "            tr_loss += loss\n",
    "            if (batch + 1) % args.gradient_accumulation_steps == 0:\n",
    "                global_step += 1\n",
    "            if args.save_steps > 0 and global_step % args.save_steps == 0:\n",
    "                # Save model checkpoint\n",
    "                output_dir = os.path.join(args.output_dir, 'checkpoint-{}'.format(global_step//10))\n",
    "                if not os.path.exists(output_dir):\n",
    "                    os.makedirs(output_dir)\n",
    "                save_checkpoint(model, os.path.join(output_dir, 'mobilebert-uncased.ckpt'))\n",
    "                logger.info(\"Saving model checkpoint to %s\", output_dir)\n",
    "            pbar(batch, {'loss': loss.asnumpy()})\n",
    "        print(\" \")\n",
    "    return global_step, tr_loss / global_step, inputs"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 模型评估"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def evaluate(args, model, tokenizer, prefix=\"\"):\n",
    "    # Loop to handle MNLI double evaluation (matched, mis-matched)\n",
    "    eval_task_names = (\"mnli\", \"mnli-mm\") if args.task_name == \"mnli\" else (args.task_name,)\n",
    "    eval_outputs_dirs = (args.output_dir, args.output_dir + '-MM') if args.task_name == \"mnli\" else (args.output_dir,)\n",
    "\n",
    "    results = {}\n",
    "    for eval_task, eval_output_dir in zip(eval_task_names, eval_outputs_dirs):\n",
    "        eval_dataset = load_and_cache_examples(args, eval_task, tokenizer, data_type='dev')\n",
    "        if not os.path.exists(eval_output_dir):\n",
    "            os.makedirs(eval_output_dir)\n",
    "\n",
    "        args.eval_batch_size = args.per_gpu_eval_batch_size * args.n_gpu\n",
    "        # Note that DistributedSampler samples randomly\n",
    "        eval_sampler = SequentialSampler()\n",
    "        eval_dataloader = NumpySlicesDataset(eval_dataset, sampler=eval_sampler)\n",
    "        eval_dataloader = eval_dataloader.batch(args.eval_batch_size)\n",
    "\n",
    "        # Eval!\n",
    "        logger.info(\"***** Running evaluation {} *****\".format(prefix))\n",
    "        logger.info(\"  Num examples = %d\", len(eval_dataset))\n",
    "        logger.info(\"  Batch size = %d\", args.eval_batch_size)\n",
    "        eval_loss = 0.0\n",
    "        nb_eval_steps = 0\n",
    "        preds = None\n",
    "        out_label_ids = None\n",
    "        pbar = ProgressBar(n_total=len(eval_dataloader), desc=\"Evaluating\")\n",
    "        model.set_train(False)\n",
    "        model.set_grad(False)\n",
    "        for batch, (all_input_ids, all_attention_mask, all_token_type_ids, all_lens, all_labels ) in enumerate(eval_dataloader):\n",
    "            inputs = {'input_ids': all_input_ids,\n",
    "                      'attention_mask': all_attention_mask,\n",
    "                      'labels': Tensor(all_labels, mindspore.int32)}\n",
    "            inputs['token_type_ids'] = all_token_type_ids\n",
    "            outputs = model(**inputs)\n",
    "            tmp_eval_loss, logits = outputs[:2]\n",
    "            eval_loss += tmp_eval_loss\n",
    "            nb_eval_steps += 1\n",
    "            if preds is None:\n",
    "                preds = logits\n",
    "                out_label_ids = inputs['labels']\n",
    "            else:\n",
    "                preds = np.append(preds, logits.asnumpy(), axis=0)\n",
    "                out_label_ids = np.append(out_label_ids, inputs['labels'].asnumpy(), axis=0)\n",
    "            pbar(batch)\n",
    "        print(' ')\n",
    "        eval_loss = eval_loss / nb_eval_steps\n",
    "        if args.output_mode == \"classification\":\n",
    "            preds = np.argmax(preds, axis=1)\n",
    "        elif args.output_mode == \"regression\":\n",
    "            preds = np.squeeze(preds)\n",
    "        result = compute_metrics(eval_task, preds, out_label_ids)\n",
    "        results.update(result)\n",
    "        logger.info(\"***** Eval results {} *****\".format(prefix))\n",
    "        for key in sorted(result.keys()):\n",
    "            logger.info(\"  %s = %s\", key, str(result[key]))\n",
    "    return results"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 加载数据集"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def load_and_cache_examples(args, task, tokenizer, data_type='train'):\n",
    "    \"\"\"load_and_cache_examples\"\"\"\n",
    "    processor = processors[task]()\n",
    "    output_mode = output_modes[task]\n",
    "    # Load data features from dataset file\n",
    "    logger.info(\"Creating features from dataset file at %s\", args.data_dir)\n",
    "    label_list = processor.get_labels()\n",
    "    if task in ['mnli', 'mnli-mm'] and 'roberta' in args.model_type:\n",
    "        # HACK(label indices are swapped in RoBERTa pretrained model)\n",
    "        label_list[1], label_list[2] = label_list[2], label_list[1]\n",
    "\n",
    "    if data_type == 'train':\n",
    "        examples = processor.get_train_examples(args.data_dir)\n",
    "    elif data_type == 'dev':\n",
    "        examples = processor.get_dev_examples(args.data_dir)\n",
    "    else:\n",
    "        examples = processor.get_test_examples(args.data_dir)\n",
    "\n",
    "    features = convert_examples_to_features(examples,\n",
    "                                            tokenizer,\n",
    "                                            label_list=label_list,\n",
    "                                            max_seq_length=args.max_seq_length,\n",
    "                                            output_mode=output_mode)\n",
    "\n",
    "    # Convert to Tensors and build dataset\n",
    "    all_input_ids = [f.input_ids for f in features]\n",
    "    all_attention_mask = [f.attention_mask for f in features]\n",
    "    all_token_type_ids = [f.token_type_ids for f in features]\n",
    "    all_lens = [f.input_len for f in features]\n",
    "    if output_mode == \"classification\":\n",
    "        all_labels = [f.label for f in features]\n",
    "    elif output_mode == \"regression\":\n",
    "        all_labels = [f.label for f in features]\n",
    "    dataset = ((all_input_ids, all_attention_mask, all_token_type_ids, all_lens, all_labels))\n",
    "    return dataset"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "scrolled": false
   },
   "outputs": [],
   "source": [
    "def main():\n",
    "    parser = argparse.ArgumentParser()\n",
    "\n",
    "    ## Required parameters\n",
    "    parser.add_argument(\"--data_dir\", default=None, type=str, required=True,\n",
    "                        help=\"The input data dir. Should contain the .tsv files (or other data files) for the task.\")\n",
    "    parser.add_argument(\"--model_type\", default=None, type=str, required=True,\n",
    "                        help=\"Model type selected in the list: \")\n",
    "    parser.add_argument(\"--model_name_or_path\", default=None, type=str, required=True,\n",
    "                        help=\"Path to pre-trained model or shortcut name selected in the list\")\n",
    "    parser.add_argument(\"--task_name\", default=None, type=str, required=True,\n",
    "                        help=\"The name of the task to train selected in the list: \")\n",
    "    parser.add_argument(\"--output_dir\", default=None, type=str, required=True,\n",
    "                        help=\"The output directory where the model predictions and checkpoints will be written.\")\n",
    "\n",
    "    ## Other parameters\n",
    "    parser.add_argument(\"--config_name\", default=\"\", type=str,\n",
    "                        help=\"Pretrained config name or path if not the same as model_name\")\n",
    "    parser.add_argument(\"--tokenizer_name\", default=\"\", type=str,\n",
    "                        help=\"Pretrained tokenizer name or path if not the same as model_name\")\n",
    "    parser.add_argument(\"--cache_dir\", default=\"\", type=str,\n",
    "                        help=\"Where do you want to store the pre-trained models downloaded from s3\")\n",
    "    parser.add_argument(\"--max_seq_length\", default=512, type=int,\n",
    "                        help=\"The maximum total input sequence length after tokenization. Sequences longer \"\n",
    "                             \"than this will be truncated, sequences shorter will be padded.\")\n",
    "    parser.add_argument(\"--do_train\", action='store_true',\n",
    "                        help=\"Whether to run training.\")\n",
    "    parser.add_argument(\"--do_eval\", action='store_true',\n",
    "                        help=\"Whether to run eval on the dev set.\")\n",
    "    parser.add_argument(\"--do_predict\", action='store_true',\n",
    "                        help=\"Whether to run the model in inference mode on the test set.\")\n",
    "    parser.add_argument(\"--do_lower_case\", action='store_true',\n",
    "                        help=\"Set this flag if you are using an uncased model.\")\n",
    "\n",
    "    parser.add_argument(\"--per_gpu_train_batch_size\", default=8, type=int,\n",
    "                        help=\"Batch size per GPU/CPU for training.\")\n",
    "    parser.add_argument(\"--per_gpu_eval_batch_size\", default=8, type=int,\n",
    "                        help=\"Batch size per GPU/CPU for evaluation.\")\n",
    "    parser.add_argument('--gradient_accumulation_steps', type=int, default=1,\n",
    "                        help=\"Number of updates steps to accumulate before performing a backward/update pass.\")\n",
    "    parser.add_argument(\"--learning_rate\", default=5e-5, type=float,\n",
    "                        help=\"The initial learning rate for Adam.\")\n",
    "    parser.add_argument(\"--weight_decay\", default=0.01, type=float,\n",
    "                        help=\"Weight deay if we apply some.\")\n",
    "    parser.add_argument(\"--adam_epsilon\", default=1e-6, type=float,\n",
    "                        help=\"Epsilon for Adam optimizer.\")\n",
    "    parser.add_argument(\"--max_grad_norm\", default=1.0, type=float,\n",
    "                        help=\"Max gradient norm.\")\n",
    "    parser.add_argument(\"--num_train_epochs\", default=3.0, type=float,\n",
    "                        help=\"Total number of training epochs to perform.\")\n",
    "    parser.add_argument(\"--max_steps\", default=-1, type=int,\n",
    "                        help=\"If > 0: set total number of training steps to perform. Override num_train_epochs.\")\n",
    "    parser.add_argument(\"--warmup_proportion\", default=0.1, type=float,\n",
    "                        help=\"Proportion of training to perform linear learning rate warmup for,E.g., 0.1 = 10% of training.\")\n",
    "\n",
    "    parser.add_argument('--logging_steps', type=int, default=10,\n",
    "                        help=\"Log every X updates steps.\")\n",
    "    parser.add_argument('--save_steps', type=int, default=1000,\n",
    "                        help=\"Save checkpoint every X updates steps.\")\n",
    "    parser.add_argument(\"--eval_all_checkpoints\", action='store_true',\n",
    "                        help=\"Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number\")\n",
    "    parser.add_argument(\"--no_cuda\", action='store_true',\n",
    "                        help=\"Avoid using CUDA when available\")\n",
    "    parser.add_argument('--overwrite_output_dir', action='store_true',\n",
    "                        help=\"Overwrite the content of the output directory\")\n",
    "    parser.add_argument('--overwrite_cache', action='store_true',\n",
    "                        help=\"Overwrite the cached training and evaluation sets\")\n",
    "    parser.add_argument('--seed', type=int, default=42,\n",
    "                        help=\"random seed for initialization\")\n",
    "\n",
    "    parser.add_argument('--fp16', action='store_true',\n",
    "                        help=\"Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit\")\n",
    "    parser.add_argument('--fp16_opt_level', type=str, default='O1',\n",
    "                        help=\"For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3'].\"\n",
    "                             \"See details at https://nvidia.github.io/apex/amp.html\")\n",
    "    parser.add_argument(\"--local_rank\", type=int, default=-1,\n",
    "                        help=\"For distributed training: local_rank\")\n",
    "    parser.add_argument('--server_ip', type=str, default='', help=\"For distant debugging.\")\n",
    "    parser.add_argument('--server_port', type=str, default='', help=\"For distant debugging.\")\n",
    "    args = parser.parse_args([\"--model_type\",\"mobilebert\",\n",
    "                             \"--model_name_or_path\",\"/home/daiyuxin/lzh/MobileBert_MindSpore/prev_trained_model/mobilebert/\",\n",
    "                             \"--task_name\",\"SST-2\",\n",
    "                             \"--do_train\",\n",
    "                             \"--do_eval\",\n",
    "                             \"--do_lower_case\",\n",
    "                             \"--data_dir\",\"/home/daiyuxin/lzh/MobileBert_MindSpore/dataset/glue_data/SST-2/\",\n",
    "                             \"--max_seq_length\",\"128\",\n",
    "                             \"--per_gpu_train_batch_size\",\"32\",\n",
    "                             \"--per_gpu_eval_batch_size\",\"32\",\n",
    "                             \"--learning_rate\",\"5e-5\",\n",
    "                             \"--num_train_epochs\",\"5.0\",\n",
    "                             \"--max_grad_norm\",\"1.0\",\n",
    "                             \"--logging_steps\",\"2105\",\n",
    "                             \"--save_steps\",\"2105\",\n",
    "                             \"--output_dir\",\"/home/daiyuxin/lzh/MobileBert_MindSpore/outputs/SST-2_output/\",\n",
    "                             \"--overwrite_output_dir\",\n",
    "                             \"--seed\",\"42\"])\n",
    "    if not os.path.exists(args.output_dir):\n",
    "        os.mkdir(args.output_dir)\n",
    "    args.output_dir = args.output_dir + '{}'.format(args.model_type)\n",
    "    if not os.path.exists(args.output_dir):\n",
    "        os.mkdir(args.output_dir)\n",
    "    time_ = time.strftime(\"%Y-%m-%d-%H_%M_%S\", time.localtime())\n",
    "    init_logger(log_file=args.output_dir + f'/{args.model_type}-{args.task_name}-{time_}.log')\n",
    "    if os.path.exists(args.output_dir) and os.listdir(\n",
    "            args.output_dir) and args.do_train and not args.overwrite_output_dir:\n",
    "        raise ValueError(\n",
    "            \"Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.\".format(\n",
    "                args.output_dir))\n",
    "\n",
    "    # Setup distant debugging if needed\n",
    "    if args.server_ip and args.server_port:\n",
    "        # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script\n",
    "        import ptvsd\n",
    "        print(\"Waiting for debugger attach\")\n",
    "        ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)\n",
    "        ptvsd.wait_for_attach()\n",
    "\n",
    "    args.n_gpu = 1\n",
    "    # Prepare GLUE task\n",
    "    args.task_name = args.task_name.lower()\n",
    "    if args.task_name not in processors:\n",
    "        raise ValueError(\"Task not found: %s\" % (args.task_name))\n",
    "    processor = processors[args.task_name]()\n",
    "    args.output_mode = output_modes[args.task_name]\n",
    "    label_list = processor.get_labels()\n",
    "    num_labels = len(label_list)\n",
    "\n",
    "    # Load pretrained model and tokenizer\n",
    "    args.model_type = args.model_type.lower()\n",
    "    config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type]\n",
    "    from urllib.parse import urlparse\n",
    "    print(urlparse(\"C:\\\\my_mindnlp_new\\\\mindnlp\\\\examples\\\\MobileBert_finetune\\\\prev_trained_model\\\\mobilebert\\\\config.json\"))\n",
    "    config = config_class.from_pretrained(\n",
    "        args.config_name if args.config_name else args.model_name_or_path,\n",
    "        num_labels=num_labels,\n",
    "        finetuning_task=args.task_name,\n",
    "        cache_dir=args.cache_dir if args.cache_dir else None,\n",
    "    )\n",
    "    vocab = text.Vocab.from_file(args.tokenizer_name if args.tokenizer_name else args.model_name_or_path+\"/vocab.txt\")\n",
    "    tokenizer = tokenizer_class(\n",
    "        vocab,\n",
    "        lower_case=args.do_lower_case,\n",
    "    )\n",
    "    model = model_class.from_pretrained(\n",
    "        args.model_name_or_path,\n",
    "        from_tf=bool(\".ckpt\" in args.model_name_or_path),\n",
    "        config=config,\n",
    "        cache_dir=args.cache_dir if args.cache_dir else None,\n",
    "    )\n",
    "    logger.info(\"Training/evaluation parameters %s\", args)\n",
    "\n",
    "    # Training\n",
    "    if args.do_train:\n",
    "        train_dataset = load_and_cache_examples(args, args.task_name, tokenizer, data_type='train')\n",
    "        global_step, tr_loss, inputs = train(args, train_dataset, model)\n",
    "        logger.info(\" global_step = %s, average loss = %s\", global_step, tr_loss)\n",
    "\n",
    "    # Saving best-practices: if you use defaults names for the model, you can reload it using from_pretrained()\n",
    "    if args.do_train:\n",
    "        # Create output directory if needed\n",
    "        if not os.path.exists(args.output_dir):\n",
    "            os.makedirs(args.output_dir)\n",
    "\n",
    "        logger.info(\"Saving model checkpoint to %s\", args.output_dir)\n",
    "        save_checkpoint(model, os.path.join(args.output_dir, 'mobilebert-uncased.ckpt'))\n",
    "\n",
    "    if args.do_eval:\n",
    "        print(evaluate(args, model, tokenizer))\n",
    "\n",
    "\n",
    "if __name__ == \"__main__\":\n",
    "    main()\n"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.5"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
