{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "# # 因为如下报错：\n",
    "# ValueError: The generation config instance is invalid -- `.validate()` throws warnings and/or exceptions. Fix these issues to save the configuration.\n",
    "\n",
    "# Thrown during validation:\n",
    "# [UserWarning('`num_beams` is set to 1. However, `early_stopping` is set to `True` -- this flag is only used in beam-based generation modes. You should set `num_beams>1` or unset `early_stopping`.')]\n",
    "# # 所以改动如下：\n",
    "# model里的 config.json 中的 \"early_stopping\": true, 被改为 false"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "\u001b[32m2024-06-25 15:13:35.126\u001b[0m | \u001b[1mINFO    \u001b[0m | \u001b[36m__main__\u001b[0m:\u001b[36m<module>\u001b[0m:\u001b[36m52\u001b[0m - \u001b[1mDataset({\n",
      "    features: ['text'],\n",
      "    num_rows: 10001\n",
      "})\u001b[0m\n",
      "\u001b[32m2024-06-25 15:13:35.127\u001b[0m | \u001b[1mINFO    \u001b[0m | \u001b[36m__main__\u001b[0m:\u001b[36m<module>\u001b[0m:\u001b[36m53\u001b[0m - \u001b[1mDataset({\n",
      "    features: ['id', 'original_text', 'wrong_ids', 'correct_text'],\n",
      "    num_rows: 1184\n",
      "})\u001b[0m\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "9f1a9397fd444ae2ad1a27b2fe162656",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "Map:   0%|          | 0/10001 [00:00<?, ? examples/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "data": {
      "text/plain": [
       "'中国的首都是北京'"
      ]
     },
     "execution_count": 1,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "import json\n",
    "from dataclasses import dataclass, field\n",
    "from typing import Optional\n",
    "import os\n",
    "import argparse\n",
    "from transformers import AutoTokenizer, BartForConditionalGeneration, Text2TextGenerationPipeline, BertTokenizer\n",
    "from transformers import HfArgumentParser, TrainingArguments, Trainer, set_seed\n",
    "from datasets import load_dataset, Dataset\n",
    "from loguru import logger\n",
    "\n",
    "# 模型选择\n",
    "# tokenizer = AutoTokenizer.from_pretrained(\"./models/bart-base-chinese-cluecorpussmall/\")\n",
    "tokenizer = BertTokenizer.from_pretrained(\"./models/bart-base-chinese-cluecorpussmall/\")\n",
    "model = BartForConditionalGeneration.from_pretrained(\"./models/bart-base-chinese-cluecorpussmall/\")\n",
    "# tokenizer = AutoTokenizer.from_pretrained(\"./models/Randeng-BART-139M/\",  use_fast=false)\n",
    "# model = BartForConditionalGeneration.from_pretrained(\"./models/Randeng-BART-139M/\")\n",
    "\n",
    "class CscDataset(object):\n",
    "    def __init__(self, file_path):\n",
    "        self.data = json.load(open(file_path, 'r', encoding='utf-8'))\n",
    "\n",
    "    def load(self):\n",
    "        data_list = []\n",
    "        for item in self.data:\n",
    "            data_list.append(item['original_text'] + '\\t' + item['correct_text'])\n",
    "            if len(data_list)>10000:\n",
    "                break\n",
    "        return {'text': data_list}\n",
    "\n",
    "import torch\n",
    "def bart_correct(tokenizer, model, text: str, max_length: int = 128):\n",
    "\n",
    "    import numpy as np\n",
    "    inputs = tokenizer.encode(text, padding=True, max_length=max_length, truncation=True,\n",
    "                                return_tensors='pt')\n",
    "    model.eval()\n",
    "    with torch.no_grad():\n",
    "        res = model(inputs).logits\n",
    "        res = np.argmax(res[0],axis=1)\n",
    "        res = res[1:-1]\n",
    "        decode_tokens = tokenizer.decode(res,skip_special_tokens=True).replace(' ', '')\n",
    "    return decode_tokens\n",
    "\n",
    "d = CscDataset(\"./csc_sample/train.json\")\n",
    "data_dict = d.load()\n",
    "train_dataset = Dataset.from_dict(data_dict, split='train')\n",
    "\n",
    "d = CscDataset(\"./csc_sample/dev.json\")\n",
    "data_dict = d.load()\n",
    "valid_dataset = Dataset.from_dict(data_dict, split='test')\n",
    "logger.info(train_dataset)\n",
    "logger.info(valid_dataset)\n",
    "\n",
    "def tokenize_dataset(tokenizer, dataset, max_len):\n",
    "    def convert_to_features(example_batch):\n",
    "        src_texts = []\n",
    "        trg_texts = []\n",
    "        for example in example_batch['text']:\n",
    "            terms = example.split('\\t', 1)\n",
    "            src_texts.append(terms[0])\n",
    "            trg_texts.append(terms[1])\n",
    "        input_encodings = tokenizer.batch_encode_plus(\n",
    "            src_texts,\n",
    "            truncation=True,\n",
    "            padding='max_length',\n",
    "            max_length=max_len,\n",
    "        )\n",
    "        target_encodings = tokenizer.batch_encode_plus(\n",
    "            trg_texts,\n",
    "            truncation=True,\n",
    "            padding='max_length',\n",
    "            max_length=max_len,\n",
    "        )\n",
    "\n",
    "        encodings = {\n",
    "            'input_ids': input_encodings['input_ids'],\n",
    "            'attention_mask': input_encodings['attention_mask'],\n",
    "            'target_ids': target_encodings['input_ids'],\n",
    "            'target_attention_mask': target_encodings['attention_mask']\n",
    "        }\n",
    "\n",
    "        return encodings\n",
    "    dataset = dataset.map(convert_to_features, batched=True)\n",
    "    # Set the tensor type and the columns which the dataset should return\n",
    "    columns = ['input_ids', 'target_ids', 'attention_mask', 'target_attention_mask']\n",
    "    dataset.with_format(type='torch', columns=columns)\n",
    "    # Rename columns to the names that the forward method of the selected\n",
    "    # model expects\n",
    "    dataset = dataset.rename_column('target_ids', 'labels')\n",
    "    dataset = dataset.rename_column('target_attention_mask', 'decoder_attention_mask')\n",
    "    dataset = dataset.remove_columns(['text'])\n",
    "    return dataset\n",
    "\n",
    "train_data = tokenize_dataset(tokenizer, train_dataset,128)\n",
    "valid_data = tokenize_dataset(tokenizer, valid_dataset,128)\n",
    "bart_correct(tokenizer, model,\"中国的首都是[MASK]京\",32)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "'中国的首都是杯京'"
      ]
     },
     "execution_count": 12,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "bart_correct(tokenizer, model,\"中国的首都是杯京\",32)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "import evaluate\n",
    "import numpy as np\n",
    "def compute_metrics(eval_preds):\n",
    "    metric = evaluate.load(\"glue\", \"mrpc\")\n",
    "    logits, labels = eval_preds\n",
    "    predictions = np.argmax(logits, axis=-1)\n",
    "    res = metric.compute(predictions=predictions, references=labels)\n",
    "    # print(res)\n",
    "    return res\n",
    "\n",
    "training_args = TrainingArguments(\n",
    "    output_dir='./results',         # output directory 结果输出地址\n",
    "    num_train_epochs=2,          # total # of training epochs 训练总批次\n",
    "    per_device_train_batch_size=12,  # batch size per device during training 训练批大小\n",
    "    per_device_eval_batch_size=12,   # batch size for evaluation 评估批大小\n",
    "    logging_dir='./logs/rn_log',    # directory for storing logs 日志存储位置\n",
    "    learning_rate=1e-4,             # 学习率\n",
    "    save_steps=False,# 不保存检查点\n",
    "    logging_steps=2,\n",
    "    eval_accumulation_steps=4,\n",
    "    # use_cpu=True\n",
    ")\n",
    "\n",
    "trainer = Trainer(\n",
    "    model=model,\n",
    "    args=training_args,\n",
    "    train_dataset=train_data,\n",
    "    eval_dataset=valid_data,\n",
    "    compute_metrics=compute_metrics\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "7d2dcc6c64644c189bad667584cf405d",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "  0%|          | 0/1668 [00:00<?, ?it/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "o:\\conda_envs\\env_DeepLearning_PyTorch-1.13.1\\lib\\site-packages\\transformers\\models\\bart\\modeling_bart.py:597: UserWarning: 1Torch was not compiled with flash attention. (Triggered internally at ..\\aten\\src\\ATen\\native\\transformers\\cuda\\sdp_utils.cpp:263.)\n",
      "  attn_output = torch.nn.functional.scaled_dot_product_attention(\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "{'loss': 8.0673, 'grad_norm': 42.19528579711914, 'learning_rate': 9.98800959232614e-05, 'epoch': 0.0}\n",
      "{'loss': 3.1424, 'grad_norm': 9.138588905334473, 'learning_rate': 9.976019184652279e-05, 'epoch': 0.0}\n",
      "{'loss': 2.127, 'grad_norm': 29.217750549316406, 'learning_rate': 9.964028776978419e-05, 'epoch': 0.01}\n",
      "{'loss': 0.1877, 'grad_norm': 3.653794527053833, 'learning_rate': 9.952038369304557e-05, 'epoch': 0.01}\n",
      "{'loss': 0.1963, 'grad_norm': 0.9644782543182373, 'learning_rate': 9.940047961630696e-05, 'epoch': 0.01}\n",
      "{'loss': 0.1428, 'grad_norm': 1.2566107511520386, 'learning_rate': 9.928057553956835e-05, 'epoch': 0.01}\n",
      "{'loss': 0.4613, 'grad_norm': 21.99432945251465, 'learning_rate': 9.916067146282975e-05, 'epoch': 0.02}\n"
     ]
    },
    {
     "ename": "KeyboardInterrupt",
     "evalue": "",
     "output_type": "error",
     "traceback": [
      "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[1;31mKeyboardInterrupt\u001b[0m                         Traceback (most recent call last)",
      "Cell \u001b[1;32mIn[5], line 1\u001b[0m\n\u001b[1;32m----> 1\u001b[0m trainer\u001b[39m.\u001b[39;49mtrain()\n\u001b[0;32m      2\u001b[0m \u001b[39m##模型保存\u001b[39;00m\n\u001b[0;32m      3\u001b[0m model\u001b[39m.\u001b[39msave_pretrained(\u001b[39m\"\u001b[39m\u001b[39m./models/change/\u001b[39m\u001b[39m\"\u001b[39m)\n",
      "File \u001b[1;32mo:\\conda_envs\\env_DeepLearning_PyTorch-1.13.1\\lib\\site-packages\\transformers\\trainer.py:1885\u001b[0m, in \u001b[0;36mTrainer.train\u001b[1;34m(self, resume_from_checkpoint, trial, ignore_keys_for_eval, **kwargs)\u001b[0m\n\u001b[0;32m   1883\u001b[0m         hf_hub_utils\u001b[39m.\u001b[39menable_progress_bars()\n\u001b[0;32m   1884\u001b[0m \u001b[39melse\u001b[39;00m:\n\u001b[1;32m-> 1885\u001b[0m     \u001b[39mreturn\u001b[39;00m inner_training_loop(\n\u001b[0;32m   1886\u001b[0m         args\u001b[39m=\u001b[39;49margs,\n\u001b[0;32m   1887\u001b[0m         resume_from_checkpoint\u001b[39m=\u001b[39;49mresume_from_checkpoint,\n\u001b[0;32m   1888\u001b[0m         trial\u001b[39m=\u001b[39;49mtrial,\n\u001b[0;32m   1889\u001b[0m         ignore_keys_for_eval\u001b[39m=\u001b[39;49mignore_keys_for_eval,\n\u001b[0;32m   1890\u001b[0m     )\n",
      "File \u001b[1;32mo:\\conda_envs\\env_DeepLearning_PyTorch-1.13.1\\lib\\site-packages\\transformers\\trainer.py:2216\u001b[0m, in \u001b[0;36mTrainer._inner_training_loop\u001b[1;34m(self, batch_size, args, resume_from_checkpoint, trial, ignore_keys_for_eval)\u001b[0m\n\u001b[0;32m   2213\u001b[0m     \u001b[39mself\u001b[39m\u001b[39m.\u001b[39mcontrol \u001b[39m=\u001b[39m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39mcallback_handler\u001b[39m.\u001b[39mon_step_begin(args, \u001b[39mself\u001b[39m\u001b[39m.\u001b[39mstate, \u001b[39mself\u001b[39m\u001b[39m.\u001b[39mcontrol)\n\u001b[0;32m   2215\u001b[0m \u001b[39mwith\u001b[39;00m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39maccelerator\u001b[39m.\u001b[39maccumulate(model):\n\u001b[1;32m-> 2216\u001b[0m     tr_loss_step \u001b[39m=\u001b[39m \u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49mtraining_step(model, inputs)\n\u001b[0;32m   2218\u001b[0m \u001b[39mif\u001b[39;00m (\n\u001b[0;32m   2219\u001b[0m     args\u001b[39m.\u001b[39mlogging_nan_inf_filter\n\u001b[0;32m   2220\u001b[0m     \u001b[39mand\u001b[39;00m \u001b[39mnot\u001b[39;00m is_torch_xla_available()\n\u001b[0;32m   2221\u001b[0m     \u001b[39mand\u001b[39;00m (torch\u001b[39m.\u001b[39misnan(tr_loss_step) \u001b[39mor\u001b[39;00m torch\u001b[39m.\u001b[39misinf(tr_loss_step))\n\u001b[0;32m   2222\u001b[0m ):\n\u001b[0;32m   2223\u001b[0m     \u001b[39m# if loss is nan or inf simply add the average of previous logged losses\u001b[39;00m\n\u001b[0;32m   2224\u001b[0m     tr_loss \u001b[39m+\u001b[39m\u001b[39m=\u001b[39m tr_loss \u001b[39m/\u001b[39m (\u001b[39m1\u001b[39m \u001b[39m+\u001b[39m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39mstate\u001b[39m.\u001b[39mglobal_step \u001b[39m-\u001b[39m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_globalstep_last_logged)\n",
      "File \u001b[1;32mo:\\conda_envs\\env_DeepLearning_PyTorch-1.13.1\\lib\\site-packages\\transformers\\trainer.py:3238\u001b[0m, in \u001b[0;36mTrainer.training_step\u001b[1;34m(self, model, inputs)\u001b[0m\n\u001b[0;32m   3235\u001b[0m     \u001b[39mreturn\u001b[39;00m loss_mb\u001b[39m.\u001b[39mreduce_mean()\u001b[39m.\u001b[39mdetach()\u001b[39m.\u001b[39mto(\u001b[39mself\u001b[39m\u001b[39m.\u001b[39margs\u001b[39m.\u001b[39mdevice)\n\u001b[0;32m   3237\u001b[0m \u001b[39mwith\u001b[39;00m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39mcompute_loss_context_manager():\n\u001b[1;32m-> 3238\u001b[0m     loss \u001b[39m=\u001b[39m \u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49mcompute_loss(model, inputs)\n\u001b[0;32m   3240\u001b[0m \u001b[39mdel\u001b[39;00m inputs\n\u001b[0;32m   3241\u001b[0m torch\u001b[39m.\u001b[39mcuda\u001b[39m.\u001b[39mempty_cache()\n",
      "File \u001b[1;32mo:\\conda_envs\\env_DeepLearning_PyTorch-1.13.1\\lib\\site-packages\\transformers\\trainer.py:3264\u001b[0m, in \u001b[0;36mTrainer.compute_loss\u001b[1;34m(self, model, inputs, return_outputs)\u001b[0m\n\u001b[0;32m   3262\u001b[0m \u001b[39melse\u001b[39;00m:\n\u001b[0;32m   3263\u001b[0m     labels \u001b[39m=\u001b[39m \u001b[39mNone\u001b[39;00m\n\u001b[1;32m-> 3264\u001b[0m outputs \u001b[39m=\u001b[39m model(\u001b[39m*\u001b[39m\u001b[39m*\u001b[39minputs)\n\u001b[0;32m   3265\u001b[0m \u001b[39m# Save past state if it exists\u001b[39;00m\n\u001b[0;32m   3266\u001b[0m \u001b[39m# TODO: this needs to be fixed and made cleaner later.\u001b[39;00m\n\u001b[0;32m   3267\u001b[0m \u001b[39mif\u001b[39;00m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39margs\u001b[39m.\u001b[39mpast_index \u001b[39m>\u001b[39m\u001b[39m=\u001b[39m \u001b[39m0\u001b[39m:\n",
      "File \u001b[1;32mo:\\conda_envs\\env_DeepLearning_PyTorch-1.13.1\\lib\\site-packages\\torch\\nn\\modules\\module.py:1511\u001b[0m, in \u001b[0;36mModule._wrapped_call_impl\u001b[1;34m(self, *args, **kwargs)\u001b[0m\n\u001b[0;32m   1509\u001b[0m     \u001b[39mreturn\u001b[39;00m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_compiled_call_impl(\u001b[39m*\u001b[39margs, \u001b[39m*\u001b[39m\u001b[39m*\u001b[39mkwargs)  \u001b[39m# type: ignore[misc]\u001b[39;00m\n\u001b[0;32m   1510\u001b[0m \u001b[39melse\u001b[39;00m:\n\u001b[1;32m-> 1511\u001b[0m     \u001b[39mreturn\u001b[39;00m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_call_impl(\u001b[39m*\u001b[39margs, \u001b[39m*\u001b[39m\u001b[39m*\u001b[39mkwargs)\n",
      "File \u001b[1;32mo:\\conda_envs\\env_DeepLearning_PyTorch-1.13.1\\lib\\site-packages\\torch\\nn\\modules\\module.py:1520\u001b[0m, in \u001b[0;36mModule._call_impl\u001b[1;34m(self, *args, **kwargs)\u001b[0m\n\u001b[0;32m   1515\u001b[0m \u001b[39m# If we don't have any hooks, we want to skip the rest of the logic in\u001b[39;00m\n\u001b[0;32m   1516\u001b[0m \u001b[39m# this function, and just call forward.\u001b[39;00m\n\u001b[0;32m   1517\u001b[0m \u001b[39mif\u001b[39;00m \u001b[39mnot\u001b[39;00m (\u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_backward_hooks \u001b[39mor\u001b[39;00m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_backward_pre_hooks \u001b[39mor\u001b[39;00m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_forward_hooks \u001b[39mor\u001b[39;00m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_forward_pre_hooks\n\u001b[0;32m   1518\u001b[0m         \u001b[39mor\u001b[39;00m _global_backward_pre_hooks \u001b[39mor\u001b[39;00m _global_backward_hooks\n\u001b[0;32m   1519\u001b[0m         \u001b[39mor\u001b[39;00m _global_forward_hooks \u001b[39mor\u001b[39;00m _global_forward_pre_hooks):\n\u001b[1;32m-> 1520\u001b[0m     \u001b[39mreturn\u001b[39;00m forward_call(\u001b[39m*\u001b[39margs, \u001b[39m*\u001b[39m\u001b[39m*\u001b[39mkwargs)\n\u001b[0;32m   1522\u001b[0m \u001b[39mtry\u001b[39;00m:\n\u001b[0;32m   1523\u001b[0m     result \u001b[39m=\u001b[39m \u001b[39mNone\u001b[39;00m\n",
      "File \u001b[1;32mo:\\conda_envs\\env_DeepLearning_PyTorch-1.13.1\\lib\\site-packages\\transformers\\models\\bart\\modeling_bart.py:1742\u001b[0m, in \u001b[0;36mBartForConditionalGeneration.forward\u001b[1;34m(self, input_ids, attention_mask, decoder_input_ids, decoder_attention_mask, head_mask, decoder_head_mask, cross_attn_head_mask, encoder_outputs, past_key_values, inputs_embeds, decoder_inputs_embeds, labels, use_cache, output_attentions, output_hidden_states, return_dict)\u001b[0m\n\u001b[0;32m   1737\u001b[0m     \u001b[39mif\u001b[39;00m decoder_input_ids \u001b[39mis\u001b[39;00m \u001b[39mNone\u001b[39;00m \u001b[39mand\u001b[39;00m decoder_inputs_embeds \u001b[39mis\u001b[39;00m \u001b[39mNone\u001b[39;00m:\n\u001b[0;32m   1738\u001b[0m         decoder_input_ids \u001b[39m=\u001b[39m shift_tokens_right(\n\u001b[0;32m   1739\u001b[0m             labels, \u001b[39mself\u001b[39m\u001b[39m.\u001b[39mconfig\u001b[39m.\u001b[39mpad_token_id, \u001b[39mself\u001b[39m\u001b[39m.\u001b[39mconfig\u001b[39m.\u001b[39mdecoder_start_token_id\n\u001b[0;32m   1740\u001b[0m         )\n\u001b[1;32m-> 1742\u001b[0m outputs \u001b[39m=\u001b[39m \u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49mmodel(\n\u001b[0;32m   1743\u001b[0m     input_ids,\n\u001b[0;32m   1744\u001b[0m     attention_mask\u001b[39m=\u001b[39;49mattention_mask,\n\u001b[0;32m   1745\u001b[0m     decoder_input_ids\u001b[39m=\u001b[39;49mdecoder_input_ids,\n\u001b[0;32m   1746\u001b[0m     encoder_outputs\u001b[39m=\u001b[39;49mencoder_outputs,\n\u001b[0;32m   1747\u001b[0m     decoder_attention_mask\u001b[39m=\u001b[39;49mdecoder_attention_mask,\n\u001b[0;32m   1748\u001b[0m     head_mask\u001b[39m=\u001b[39;49mhead_mask,\n\u001b[0;32m   1749\u001b[0m     decoder_head_mask\u001b[39m=\u001b[39;49mdecoder_head_mask,\n\u001b[0;32m   1750\u001b[0m     cross_attn_head_mask\u001b[39m=\u001b[39;49mcross_attn_head_mask,\n\u001b[0;32m   1751\u001b[0m     past_key_values\u001b[39m=\u001b[39;49mpast_key_values,\n\u001b[0;32m   1752\u001b[0m     inputs_embeds\u001b[39m=\u001b[39;49minputs_embeds,\n\u001b[0;32m   1753\u001b[0m     decoder_inputs_embeds\u001b[39m=\u001b[39;49mdecoder_inputs_embeds,\n\u001b[0;32m   1754\u001b[0m     use_cache\u001b[39m=\u001b[39;49muse_cache,\n\u001b[0;32m   1755\u001b[0m     output_attentions\u001b[39m=\u001b[39;49moutput_attentions,\n\u001b[0;32m   1756\u001b[0m     output_hidden_states\u001b[39m=\u001b[39;49moutput_hidden_states,\n\u001b[0;32m   1757\u001b[0m     return_dict\u001b[39m=\u001b[39;49mreturn_dict,\n\u001b[0;32m   1758\u001b[0m )\n\u001b[0;32m   1760\u001b[0m lm_logits \u001b[39m=\u001b[39m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39mlm_head(outputs[\u001b[39m0\u001b[39m])\n\u001b[0;32m   1761\u001b[0m lm_logits \u001b[39m=\u001b[39m lm_logits \u001b[39m+\u001b[39m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39mfinal_logits_bias\u001b[39m.\u001b[39mto(lm_logits\u001b[39m.\u001b[39mdevice)\n",
      "File \u001b[1;32mo:\\conda_envs\\env_DeepLearning_PyTorch-1.13.1\\lib\\site-packages\\torch\\nn\\modules\\module.py:1511\u001b[0m, in \u001b[0;36mModule._wrapped_call_impl\u001b[1;34m(self, *args, **kwargs)\u001b[0m\n\u001b[0;32m   1509\u001b[0m     \u001b[39mreturn\u001b[39;00m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_compiled_call_impl(\u001b[39m*\u001b[39margs, \u001b[39m*\u001b[39m\u001b[39m*\u001b[39mkwargs)  \u001b[39m# type: ignore[misc]\u001b[39;00m\n\u001b[0;32m   1510\u001b[0m \u001b[39melse\u001b[39;00m:\n\u001b[1;32m-> 1511\u001b[0m     \u001b[39mreturn\u001b[39;00m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_call_impl(\u001b[39m*\u001b[39margs, \u001b[39m*\u001b[39m\u001b[39m*\u001b[39mkwargs)\n",
      "File \u001b[1;32mo:\\conda_envs\\env_DeepLearning_PyTorch-1.13.1\\lib\\site-packages\\torch\\nn\\modules\\module.py:1520\u001b[0m, in \u001b[0;36mModule._call_impl\u001b[1;34m(self, *args, **kwargs)\u001b[0m\n\u001b[0;32m   1515\u001b[0m \u001b[39m# If we don't have any hooks, we want to skip the rest of the logic in\u001b[39;00m\n\u001b[0;32m   1516\u001b[0m \u001b[39m# this function, and just call forward.\u001b[39;00m\n\u001b[0;32m   1517\u001b[0m \u001b[39mif\u001b[39;00m \u001b[39mnot\u001b[39;00m (\u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_backward_hooks \u001b[39mor\u001b[39;00m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_backward_pre_hooks \u001b[39mor\u001b[39;00m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_forward_hooks \u001b[39mor\u001b[39;00m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_forward_pre_hooks\n\u001b[0;32m   1518\u001b[0m         \u001b[39mor\u001b[39;00m _global_backward_pre_hooks \u001b[39mor\u001b[39;00m _global_backward_hooks\n\u001b[0;32m   1519\u001b[0m         \u001b[39mor\u001b[39;00m _global_forward_hooks \u001b[39mor\u001b[39;00m _global_forward_pre_hooks):\n\u001b[1;32m-> 1520\u001b[0m     \u001b[39mreturn\u001b[39;00m forward_call(\u001b[39m*\u001b[39margs, \u001b[39m*\u001b[39m\u001b[39m*\u001b[39mkwargs)\n\u001b[0;32m   1522\u001b[0m \u001b[39mtry\u001b[39;00m:\n\u001b[0;32m   1523\u001b[0m     result \u001b[39m=\u001b[39m \u001b[39mNone\u001b[39;00m\n",
      "File \u001b[1;32mo:\\conda_envs\\env_DeepLearning_PyTorch-1.13.1\\lib\\site-packages\\transformers\\models\\bart\\modeling_bart.py:1628\u001b[0m, in \u001b[0;36mBartModel.forward\u001b[1;34m(self, input_ids, attention_mask, decoder_input_ids, decoder_attention_mask, head_mask, decoder_head_mask, cross_attn_head_mask, encoder_outputs, past_key_values, inputs_embeds, decoder_inputs_embeds, use_cache, output_attentions, output_hidden_states, return_dict)\u001b[0m\n\u001b[0;32m   1621\u001b[0m     encoder_outputs \u001b[39m=\u001b[39m BaseModelOutput(\n\u001b[0;32m   1622\u001b[0m         last_hidden_state\u001b[39m=\u001b[39mencoder_outputs[\u001b[39m0\u001b[39m],\n\u001b[0;32m   1623\u001b[0m         hidden_states\u001b[39m=\u001b[39mencoder_outputs[\u001b[39m1\u001b[39m] \u001b[39mif\u001b[39;00m \u001b[39mlen\u001b[39m(encoder_outputs) \u001b[39m>\u001b[39m \u001b[39m1\u001b[39m \u001b[39melse\u001b[39;00m \u001b[39mNone\u001b[39;00m,\n\u001b[0;32m   1624\u001b[0m         attentions\u001b[39m=\u001b[39mencoder_outputs[\u001b[39m2\u001b[39m] \u001b[39mif\u001b[39;00m \u001b[39mlen\u001b[39m(encoder_outputs) \u001b[39m>\u001b[39m \u001b[39m2\u001b[39m \u001b[39melse\u001b[39;00m \u001b[39mNone\u001b[39;00m,\n\u001b[0;32m   1625\u001b[0m     )\n\u001b[0;32m   1627\u001b[0m \u001b[39m# decoder outputs consists of (dec_features, past_key_value, dec_hidden, dec_attn)\u001b[39;00m\n\u001b[1;32m-> 1628\u001b[0m decoder_outputs \u001b[39m=\u001b[39m \u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49mdecoder(\n\u001b[0;32m   1629\u001b[0m     input_ids\u001b[39m=\u001b[39;49mdecoder_input_ids,\n\u001b[0;32m   1630\u001b[0m     attention_mask\u001b[39m=\u001b[39;49mdecoder_attention_mask,\n\u001b[0;32m   1631\u001b[0m     encoder_hidden_states\u001b[39m=\u001b[39;49mencoder_outputs[\u001b[39m0\u001b[39;49m],\n\u001b[0;32m   1632\u001b[0m     encoder_attention_mask\u001b[39m=\u001b[39;49mattention_mask,\n\u001b[0;32m   1633\u001b[0m     head_mask\u001b[39m=\u001b[39;49mdecoder_head_mask,\n\u001b[0;32m   1634\u001b[0m     cross_attn_head_mask\u001b[39m=\u001b[39;49mcross_attn_head_mask,\n\u001b[0;32m   1635\u001b[0m     past_key_values\u001b[39m=\u001b[39;49mpast_key_values,\n\u001b[0;32m   1636\u001b[0m     inputs_embeds\u001b[39m=\u001b[39;49mdecoder_inputs_embeds,\n\u001b[0;32m   1637\u001b[0m     use_cache\u001b[39m=\u001b[39;49muse_cache,\n\u001b[0;32m   1638\u001b[0m     output_attentions\u001b[39m=\u001b[39;49moutput_attentions,\n\u001b[0;32m   1639\u001b[0m     output_hidden_states\u001b[39m=\u001b[39;49moutput_hidden_states,\n\u001b[0;32m   1640\u001b[0m     return_dict\u001b[39m=\u001b[39;49mreturn_dict,\n\u001b[0;32m   1641\u001b[0m )\n\u001b[0;32m   1643\u001b[0m \u001b[39mif\u001b[39;00m \u001b[39mnot\u001b[39;00m return_dict:\n\u001b[0;32m   1644\u001b[0m     \u001b[39mreturn\u001b[39;00m decoder_outputs \u001b[39m+\u001b[39m encoder_outputs\n",
      "File \u001b[1;32mo:\\conda_envs\\env_DeepLearning_PyTorch-1.13.1\\lib\\site-packages\\torch\\nn\\modules\\module.py:1511\u001b[0m, in \u001b[0;36mModule._wrapped_call_impl\u001b[1;34m(self, *args, **kwargs)\u001b[0m\n\u001b[0;32m   1509\u001b[0m     \u001b[39mreturn\u001b[39;00m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_compiled_call_impl(\u001b[39m*\u001b[39margs, \u001b[39m*\u001b[39m\u001b[39m*\u001b[39mkwargs)  \u001b[39m# type: ignore[misc]\u001b[39;00m\n\u001b[0;32m   1510\u001b[0m \u001b[39melse\u001b[39;00m:\n\u001b[1;32m-> 1511\u001b[0m     \u001b[39mreturn\u001b[39;00m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_call_impl(\u001b[39m*\u001b[39margs, \u001b[39m*\u001b[39m\u001b[39m*\u001b[39mkwargs)\n",
      "File \u001b[1;32mo:\\conda_envs\\env_DeepLearning_PyTorch-1.13.1\\lib\\site-packages\\torch\\nn\\modules\\module.py:1520\u001b[0m, in \u001b[0;36mModule._call_impl\u001b[1;34m(self, *args, **kwargs)\u001b[0m\n\u001b[0;32m   1515\u001b[0m \u001b[39m# If we don't have any hooks, we want to skip the rest of the logic in\u001b[39;00m\n\u001b[0;32m   1516\u001b[0m \u001b[39m# this function, and just call forward.\u001b[39;00m\n\u001b[0;32m   1517\u001b[0m \u001b[39mif\u001b[39;00m \u001b[39mnot\u001b[39;00m (\u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_backward_hooks \u001b[39mor\u001b[39;00m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_backward_pre_hooks \u001b[39mor\u001b[39;00m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_forward_hooks \u001b[39mor\u001b[39;00m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_forward_pre_hooks\n\u001b[0;32m   1518\u001b[0m         \u001b[39mor\u001b[39;00m _global_backward_pre_hooks \u001b[39mor\u001b[39;00m _global_backward_hooks\n\u001b[0;32m   1519\u001b[0m         \u001b[39mor\u001b[39;00m _global_forward_hooks \u001b[39mor\u001b[39;00m _global_forward_pre_hooks):\n\u001b[1;32m-> 1520\u001b[0m     \u001b[39mreturn\u001b[39;00m forward_call(\u001b[39m*\u001b[39margs, \u001b[39m*\u001b[39m\u001b[39m*\u001b[39mkwargs)\n\u001b[0;32m   1522\u001b[0m \u001b[39mtry\u001b[39;00m:\n\u001b[0;32m   1523\u001b[0m     result \u001b[39m=\u001b[39m \u001b[39mNone\u001b[39;00m\n",
      "File \u001b[1;32mo:\\conda_envs\\env_DeepLearning_PyTorch-1.13.1\\lib\\site-packages\\transformers\\models\\bart\\modeling_bart.py:1394\u001b[0m, in \u001b[0;36mBartDecoder.forward\u001b[1;34m(self, input_ids, attention_mask, encoder_hidden_states, encoder_attention_mask, head_mask, cross_attn_head_mask, past_key_values, inputs_embeds, use_cache, output_attentions, output_hidden_states, return_dict)\u001b[0m\n\u001b[0;32m   1390\u001b[0m     attention_mask \u001b[39m=\u001b[39m attention_mask \u001b[39mif\u001b[39;00m (attention_mask \u001b[39mis\u001b[39;00m \u001b[39mnot\u001b[39;00m \u001b[39mNone\u001b[39;00m \u001b[39mand\u001b[39;00m \u001b[39m0\u001b[39m \u001b[39min\u001b[39;00m attention_mask) \u001b[39melse\u001b[39;00m \u001b[39mNone\u001b[39;00m\n\u001b[0;32m   1391\u001b[0m \u001b[39melif\u001b[39;00m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_use_sdpa \u001b[39mand\u001b[39;00m \u001b[39mnot\u001b[39;00m output_attentions \u001b[39mand\u001b[39;00m cross_attn_head_mask \u001b[39mis\u001b[39;00m \u001b[39mNone\u001b[39;00m:\n\u001b[0;32m   1392\u001b[0m     \u001b[39m# output_attentions=True & cross_attn_head_mask can not be supported when using SDPA, and we fall back on\u001b[39;00m\n\u001b[0;32m   1393\u001b[0m     \u001b[39m# the manual implementation that requires a 4D causal mask in all cases.\u001b[39;00m\n\u001b[1;32m-> 1394\u001b[0m     attention_mask \u001b[39m=\u001b[39m _prepare_4d_causal_attention_mask_for_sdpa(\n\u001b[0;32m   1395\u001b[0m         attention_mask,\n\u001b[0;32m   1396\u001b[0m         input_shape,\n\u001b[0;32m   1397\u001b[0m         inputs_embeds,\n\u001b[0;32m   1398\u001b[0m         past_key_values_length,\n\u001b[0;32m   1399\u001b[0m     )\n\u001b[0;32m   1400\u001b[0m \u001b[39melse\u001b[39;00m:\n\u001b[0;32m   1401\u001b[0m     \u001b[39m# 4d mask is passed through the layers\u001b[39;00m\n\u001b[0;32m   1402\u001b[0m     attention_mask \u001b[39m=\u001b[39m _prepare_4d_causal_attention_mask(\n\u001b[0;32m   1403\u001b[0m         attention_mask, input_shape, inputs_embeds, past_key_values_length\n\u001b[0;32m   1404\u001b[0m     )\n",
      "File \u001b[1;32mo:\\conda_envs\\env_DeepLearning_PyTorch-1.13.1\\lib\\site-packages\\transformers\\modeling_attn_mask_utils.py:372\u001b[0m, in \u001b[0;36m_prepare_4d_causal_attention_mask_for_sdpa\u001b[1;34m(attention_mask, input_shape, inputs_embeds, past_key_values_length, sliding_window)\u001b[0m\n\u001b[0;32m    363\u001b[0m \u001b[39m# torch.jit.trace, symbolic_trace and torchdynamo with fullgraph=True are unable to capture the controlflow `is_causal=attention_mask is None and q_len > 1`\u001b[39;00m\n\u001b[0;32m    364\u001b[0m \u001b[39m# used as an SDPA argument. We keep compatibility with these tracing tools by always using SDPA's `attn_mask` argument in case we are tracing.\u001b[39;00m\n\u001b[0;32m    365\u001b[0m \u001b[39m# TODO: For dynamo, rather use a check on fullgraph=True once this is possible (https://github.com/pytorch/pytorch/pull/120400).\u001b[39;00m\n\u001b[0;32m    366\u001b[0m is_tracing \u001b[39m=\u001b[39m (\n\u001b[0;32m    367\u001b[0m     torch\u001b[39m.\u001b[39mjit\u001b[39m.\u001b[39mis_tracing()\n\u001b[0;32m    368\u001b[0m     \u001b[39mor\u001b[39;00m \u001b[39misinstance\u001b[39m(inputs_embeds, torch\u001b[39m.\u001b[39mfx\u001b[39m.\u001b[39mProxy)\n\u001b[0;32m    369\u001b[0m     \u001b[39mor\u001b[39;00m (\u001b[39mhasattr\u001b[39m(torch, \u001b[39m\"\u001b[39m\u001b[39m_dynamo\u001b[39m\u001b[39m\"\u001b[39m) \u001b[39mand\u001b[39;00m torch\u001b[39m.\u001b[39m_dynamo\u001b[39m.\u001b[39mis_compiling())\n\u001b[0;32m    370\u001b[0m )\n\u001b[1;32m--> 372\u001b[0m ignore_causal_mask \u001b[39m=\u001b[39m AttentionMaskConverter\u001b[39m.\u001b[39;49m_ignore_causal_mask_sdpa(\n\u001b[0;32m    373\u001b[0m     attention_mask\u001b[39m=\u001b[39;49mattention_mask,\n\u001b[0;32m    374\u001b[0m     inputs_embeds\u001b[39m=\u001b[39;49minputs_embeds,\n\u001b[0;32m    375\u001b[0m     past_key_values_length\u001b[39m=\u001b[39;49mpast_key_values_length,\n\u001b[0;32m    376\u001b[0m     sliding_window\u001b[39m=\u001b[39;49msliding_window,\n\u001b[0;32m    377\u001b[0m )\n\u001b[0;32m    379\u001b[0m \u001b[39mif\u001b[39;00m ignore_causal_mask:\n\u001b[0;32m    380\u001b[0m     expanded_4d_mask \u001b[39m=\u001b[39m \u001b[39mNone\u001b[39;00m\n",
      "File \u001b[1;32mo:\\conda_envs\\env_DeepLearning_PyTorch-1.13.1\\lib\\site-packages\\transformers\\modeling_attn_mask_utils.py:279\u001b[0m, in \u001b[0;36mAttentionMaskConverter._ignore_causal_mask_sdpa\u001b[1;34m(attention_mask, inputs_embeds, past_key_values_length, sliding_window, is_training)\u001b[0m\n\u001b[0;32m    277\u001b[0m \u001b[39mif\u001b[39;00m \u001b[39mlen\u001b[39m(attention_mask\u001b[39m.\u001b[39mshape) \u001b[39m==\u001b[39m \u001b[39m4\u001b[39m:\n\u001b[0;32m    278\u001b[0m     \u001b[39mreturn\u001b[39;00m \u001b[39mFalse\u001b[39;00m\n\u001b[1;32m--> 279\u001b[0m \u001b[39melif\u001b[39;00m (is_training \u001b[39mor\u001b[39;00m \u001b[39mnot\u001b[39;00m is_tracing) \u001b[39mand\u001b[39;00m torch\u001b[39m.\u001b[39mall(attention_mask \u001b[39m==\u001b[39m \u001b[39m1\u001b[39m):\n\u001b[0;32m    280\u001b[0m     \u001b[39mif\u001b[39;00m query_length \u001b[39m==\u001b[39m \u001b[39m1\u001b[39m \u001b[39mor\u001b[39;00m key_value_length \u001b[39m==\u001b[39m query_length:\n\u001b[0;32m    281\u001b[0m         \u001b[39m# For query_length == 1, causal attention and bi-directional attention are the same.\u001b[39;00m\n\u001b[0;32m    282\u001b[0m         ignore_causal_mask \u001b[39m=\u001b[39m \u001b[39mTrue\u001b[39;00m\n",
      "\u001b[1;31mKeyboardInterrupt\u001b[0m: "
     ]
    }
   ],
   "source": [
    "### 训练\n",
    "trainer.train()\n",
    "##模型保存\n",
    "model.save_pretrained(\"./models/change/\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "new_model = BartForConditionalGeneration.from_pretrained(\"./models/change/\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "'中国的首都是杯京'"
      ]
     },
     "execution_count": 38,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "bart_correct(tokenizer, new_model, \"中国的首都是杯京\", 32)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "'中国的首都是北京'"
      ]
     },
     "execution_count": 12,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "bart_correct(tokenizer, new_model, \"中国的首都是[MASK]京\", 32)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "'吃了早饭以后她去上课。'"
      ]
     },
     "execution_count": 13,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "bart_correct(tokenizer,new_model,\"吃了早菜以后他去上课。\", 32)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "'他们看了一个很可爱的电影，一个小机一个人去火星。'"
      ]
     },
     "execution_count": 14,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "bart_correct(tokenizer,new_model,\"他们看了一个很可爱的电影，一个小机一个人去火星。\", 32)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "'她是个好女孩'"
      ]
     },
     "execution_count": 22,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "bart_correct(tokenizer,new_model,\"ta是个好女孩\", 32)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "'电子学习工具有许多，比如笔记本电脑、触控板、平板等等。'"
      ]
     },
     "execution_count": 45,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "bart_correct(tokenizer,new_model,\"电子学习工具有许昌，比如笔记本电脑、触控版、平板等等。\", 128)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "'桂林市是世界闻名<mask>，它有悠久的<mask>'"
      ]
     },
     "execution_count": 49,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "bart_correct(tokenizer,new_model,\"桂林市是世界闻名<mask> ，它有悠久的<mask>\", 128)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [
    {
     "ename": "AttributeError",
     "evalue": "'NoneType' object has no attribute 'get'",
     "output_type": "error",
     "traceback": [
      "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[1;31mAttributeError\u001b[0m                            Traceback (most recent call last)",
      "Cell \u001b[1;32mIn[3], line 17\u001b[0m\n\u001b[0;32m     13\u001b[0m \u001b[39mfrom\u001b[39;00m \u001b[39mpycorrector\u001b[39;00m \u001b[39mimport\u001b[39;00m eval_sighan2015_by_model_batch\n\u001b[0;32m     15\u001b[0m \u001b[39m# valid_dataset = valid_dataset.shuffle(42).select(range(100))\u001b[39;00m\n\u001b[0;32m     16\u001b[0m \u001b[39m# logger.info(valid_dataset)\u001b[39;00m\n\u001b[1;32m---> 17\u001b[0m eval_sighan2015_by_model_batch(trainer\u001b[39m.\u001b[39;49mpredict(valid_dataset))\n",
      "File \u001b[1;32mo:\\conda_envs\\env_DeepLearning_PyTorch-1.13.1\\lib\\site-packages\\transformers\\trainer.py:3648\u001b[0m, in \u001b[0;36mTrainer.predict\u001b[1;34m(self, test_dataset, ignore_keys, metric_key_prefix)\u001b[0m\n\u001b[0;32m   3645\u001b[0m start_time \u001b[39m=\u001b[39m time\u001b[39m.\u001b[39mtime()\n\u001b[0;32m   3647\u001b[0m eval_loop \u001b[39m=\u001b[39m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39mprediction_loop \u001b[39mif\u001b[39;00m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39margs\u001b[39m.\u001b[39muse_legacy_prediction_loop \u001b[39melse\u001b[39;00m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39mevaluation_loop\n\u001b[1;32m-> 3648\u001b[0m output \u001b[39m=\u001b[39m eval_loop(\n\u001b[0;32m   3649\u001b[0m     test_dataloader, description\u001b[39m=\u001b[39;49m\u001b[39m\"\u001b[39;49m\u001b[39mPrediction\u001b[39;49m\u001b[39m\"\u001b[39;49m, ignore_keys\u001b[39m=\u001b[39;49mignore_keys, metric_key_prefix\u001b[39m=\u001b[39;49mmetric_key_prefix\n\u001b[0;32m   3650\u001b[0m )\n\u001b[0;32m   3651\u001b[0m total_batch_size \u001b[39m=\u001b[39m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39margs\u001b[39m.\u001b[39meval_batch_size \u001b[39m*\u001b[39m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39margs\u001b[39m.\u001b[39mworld_size\n\u001b[0;32m   3652\u001b[0m \u001b[39mif\u001b[39;00m \u001b[39mf\u001b[39m\u001b[39m\"\u001b[39m\u001b[39m{\u001b[39;00mmetric_key_prefix\u001b[39m}\u001b[39;00m\u001b[39m_jit_compilation_time\u001b[39m\u001b[39m\"\u001b[39m \u001b[39min\u001b[39;00m output\u001b[39m.\u001b[39mmetrics:\n",
      "File \u001b[1;32mo:\\conda_envs\\env_DeepLearning_PyTorch-1.13.1\\lib\\site-packages\\transformers\\trainer.py:3757\u001b[0m, in \u001b[0;36mTrainer.evaluation_loop\u001b[1;34m(self, dataloader, description, prediction_loss_only, ignore_keys, metric_key_prefix)\u001b[0m\n\u001b[0;32m   3754\u001b[0m         batch_size \u001b[39m=\u001b[39m observed_batch_size\n\u001b[0;32m   3756\u001b[0m \u001b[39m# Prediction step\u001b[39;00m\n\u001b[1;32m-> 3757\u001b[0m loss, logits, labels \u001b[39m=\u001b[39m \u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49mprediction_step(model, inputs, prediction_loss_only, ignore_keys\u001b[39m=\u001b[39;49mignore_keys)\n\u001b[0;32m   3758\u001b[0m main_input_name \u001b[39m=\u001b[39m \u001b[39mgetattr\u001b[39m(\u001b[39mself\u001b[39m\u001b[39m.\u001b[39mmodel, \u001b[39m\"\u001b[39m\u001b[39mmain_input_name\u001b[39m\u001b[39m\"\u001b[39m, \u001b[39m\"\u001b[39m\u001b[39minput_ids\u001b[39m\u001b[39m\"\u001b[39m)\n\u001b[0;32m   3759\u001b[0m inputs_decode \u001b[39m=\u001b[39m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_prepare_input(inputs[main_input_name]) \u001b[39mif\u001b[39;00m args\u001b[39m.\u001b[39minclude_inputs_for_metrics \u001b[39melse\u001b[39;00m \u001b[39mNone\u001b[39;00m\n",
      "File \u001b[1;32mo:\\conda_envs\\env_DeepLearning_PyTorch-1.13.1\\lib\\site-packages\\transformers\\trainer.py:3924\u001b[0m, in \u001b[0;36mTrainer.prediction_step\u001b[1;34m(self, model, inputs, prediction_loss_only, ignore_keys)\u001b[0m\n\u001b[0;32m   3894\u001b[0m \u001b[39mdef\u001b[39;00m \u001b[39mprediction_step\u001b[39m(\n\u001b[0;32m   3895\u001b[0m     \u001b[39mself\u001b[39m,\n\u001b[0;32m   3896\u001b[0m     model: nn\u001b[39m.\u001b[39mModule,\n\u001b[1;32m   (...)\u001b[0m\n\u001b[0;32m   3899\u001b[0m     ignore_keys: Optional[List[\u001b[39mstr\u001b[39m]] \u001b[39m=\u001b[39m \u001b[39mNone\u001b[39;00m,\n\u001b[0;32m   3900\u001b[0m ) \u001b[39m-\u001b[39m\u001b[39m>\u001b[39m Tuple[Optional[torch\u001b[39m.\u001b[39mTensor], Optional[torch\u001b[39m.\u001b[39mTensor], Optional[torch\u001b[39m.\u001b[39mTensor]]:\n\u001b[0;32m   3901\u001b[0m \u001b[39m    \u001b[39m\u001b[39m\"\"\"\u001b[39;00m\n\u001b[0;32m   3902\u001b[0m \u001b[39m    Perform an evaluation step on `model` using `inputs`.\u001b[39;00m\n\u001b[0;32m   3903\u001b[0m \n\u001b[1;32m   (...)\u001b[0m\n\u001b[0;32m   3922\u001b[0m \u001b[39m        logits and labels (each being optional).\u001b[39;00m\n\u001b[0;32m   3923\u001b[0m \u001b[39m    \"\"\"\u001b[39;00m\n\u001b[1;32m-> 3924\u001b[0m     has_labels \u001b[39m=\u001b[39m \u001b[39mFalse\u001b[39;00m \u001b[39mif\u001b[39;00m \u001b[39mlen\u001b[39m(\u001b[39mself\u001b[39m\u001b[39m.\u001b[39mlabel_names) \u001b[39m==\u001b[39m \u001b[39m0\u001b[39m \u001b[39melse\u001b[39;00m \u001b[39mall\u001b[39;49m(inputs\u001b[39m.\u001b[39;49mget(k) \u001b[39mis\u001b[39;49;00m \u001b[39mnot\u001b[39;49;00m \u001b[39mNone\u001b[39;49;00m \u001b[39mfor\u001b[39;49;00m k \u001b[39min\u001b[39;49;00m \u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49mlabel_names)\n\u001b[0;32m   3925\u001b[0m     \u001b[39m# For CLIP-like models capable of returning loss values.\u001b[39;00m\n\u001b[0;32m   3926\u001b[0m     \u001b[39m# If `return_loss` is not specified or being `None` in `inputs`, we check if the default value of `return_loss`\u001b[39;00m\n\u001b[0;32m   3927\u001b[0m     \u001b[39m# is `True` in `model.forward`.\u001b[39;00m\n\u001b[0;32m   3928\u001b[0m     return_loss \u001b[39m=\u001b[39m inputs\u001b[39m.\u001b[39mget(\u001b[39m\"\u001b[39m\u001b[39mreturn_loss\u001b[39m\u001b[39m\"\u001b[39m, \u001b[39mNone\u001b[39;00m)\n",
      "File \u001b[1;32mo:\\conda_envs\\env_DeepLearning_PyTorch-1.13.1\\lib\\site-packages\\transformers\\trainer.py:3924\u001b[0m, in \u001b[0;36m<genexpr>\u001b[1;34m(.0)\u001b[0m\n\u001b[0;32m   3894\u001b[0m \u001b[39mdef\u001b[39;00m \u001b[39mprediction_step\u001b[39m(\n\u001b[0;32m   3895\u001b[0m     \u001b[39mself\u001b[39m,\n\u001b[0;32m   3896\u001b[0m     model: nn\u001b[39m.\u001b[39mModule,\n\u001b[1;32m   (...)\u001b[0m\n\u001b[0;32m   3899\u001b[0m     ignore_keys: Optional[List[\u001b[39mstr\u001b[39m]] \u001b[39m=\u001b[39m \u001b[39mNone\u001b[39;00m,\n\u001b[0;32m   3900\u001b[0m ) \u001b[39m-\u001b[39m\u001b[39m>\u001b[39m Tuple[Optional[torch\u001b[39m.\u001b[39mTensor], Optional[torch\u001b[39m.\u001b[39mTensor], Optional[torch\u001b[39m.\u001b[39mTensor]]:\n\u001b[0;32m   3901\u001b[0m \u001b[39m    \u001b[39m\u001b[39m\"\"\"\u001b[39;00m\n\u001b[0;32m   3902\u001b[0m \u001b[39m    Perform an evaluation step on `model` using `inputs`.\u001b[39;00m\n\u001b[0;32m   3903\u001b[0m \n\u001b[1;32m   (...)\u001b[0m\n\u001b[0;32m   3922\u001b[0m \u001b[39m        logits and labels (each being optional).\u001b[39;00m\n\u001b[0;32m   3923\u001b[0m \u001b[39m    \"\"\"\u001b[39;00m\n\u001b[1;32m-> 3924\u001b[0m     has_labels \u001b[39m=\u001b[39m \u001b[39mFalse\u001b[39;00m \u001b[39mif\u001b[39;00m \u001b[39mlen\u001b[39m(\u001b[39mself\u001b[39m\u001b[39m.\u001b[39mlabel_names) \u001b[39m==\u001b[39m \u001b[39m0\u001b[39m \u001b[39melse\u001b[39;00m \u001b[39mall\u001b[39m(inputs\u001b[39m.\u001b[39;49mget(k) \u001b[39mis\u001b[39;00m \u001b[39mnot\u001b[39;00m \u001b[39mNone\u001b[39;00m \u001b[39mfor\u001b[39;00m k \u001b[39min\u001b[39;00m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39mlabel_names)\n\u001b[0;32m   3925\u001b[0m     \u001b[39m# For CLIP-like models capable of returning loss values.\u001b[39;00m\n\u001b[0;32m   3926\u001b[0m     \u001b[39m# If `return_loss` is not specified or being `None` in `inputs`, we check if the default value of `return_loss`\u001b[39;00m\n\u001b[0;32m   3927\u001b[0m     \u001b[39m# is `True` in `model.forward`.\u001b[39;00m\n\u001b[0;32m   3928\u001b[0m     return_loss \u001b[39m=\u001b[39m inputs\u001b[39m.\u001b[39mget(\u001b[39m\"\u001b[39m\u001b[39mreturn_loss\u001b[39m\u001b[39m\"\u001b[39m, \u001b[39mNone\u001b[39;00m)\n",
      "\u001b[1;31mAttributeError\u001b[0m: 'NoneType' object has no attribute 'get'"
     ]
    }
   ],
   "source": [
    "### 试图评估，但error\n",
    "# # import evaluate\n",
    "# # module_type 默认为 'metric'\n",
    "# accuracy = evaluate.load(\"accuracy\")\n",
    "# # data = load_dataset(\"imdb\", split=\"test\").shuffle().select(range(1000))\n",
    "# metric = evaluate.load(\"accuracy\")\n",
    "# results = accuracy.compute(model_or_pipeline=model, data=valid_data, metric=metric,\n",
    "#                        label_mapping={\"NEGATIVE\": 0, \"POSITIVE\": 1},\n",
    "#                        strategy=\"bootstrap\", n_resamples=200)\n",
    "\n",
    "# print(results)\n",
    "\n",
    "# trainer.evaluate()\n",
    "from pycorrector import eval_sighan2015_by_model_batch\n",
    "\n",
    "# valid_dataset = valid_dataset.shuffle(42).select(range(100))\n",
    "# logger.info(valid_dataset)\n",
    "eval_sighan2015_by_model_batch(trainer.predict(valid_data))"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "env_DeepLearning_PyTorch-1.13.1",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.10.13"
  },
  "orig_nbformat": 4,
  "vscode": {
   "interpreter": {
    "hash": "20ff74ba87f01ad54d10aa54b89201a0a9136d24594b8998b4fa90e9a77eabca"
   }
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
