{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "3b2e6ab7",
   "metadata": {
    "scrolled": false
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "PreTrainedTokenizer(name_or_path='Helsinki-NLP/opus-mt-en-ro', vocab_size=59543, model_max_len=512, is_fast=False, padding_side='right', truncation_side='right', special_tokens={'eos_token': '</s>', 'unk_token': '<unk>', 'pad_token': '<pad>'})\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "{'input_ids': [[125, 778, 3, 63, 141, 9191, 23, 187, 32, 716, 9191, 2, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]}"
      ]
     },
     "execution_count": 1,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "from transformers import AutoTokenizer\n",
    "\n",
    "#加载编码器\n",
    "tokenizer = AutoTokenizer.from_pretrained('Helsinki-NLP/opus-mt-en-ro',\n",
    "                                          use_fast=True)\n",
    "\n",
    "print(tokenizer)\n",
    "\n",
    "#编码试算\n",
    "tokenizer.batch_encode_plus(\n",
    "    [['Hello, this one sentence!', 'This is another sentence.']])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "69c480e7",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Loading cached shuffled indices for dataset at datas/wmt16/ro-en/train/cache-91525599c6b01037.arrow\n",
      "Loading cached shuffled indices for dataset at datas/wmt16/ro-en/validation/cache-4a013ce783f1228a.arrow\n",
      "Loading cached shuffled indices for dataset at datas/wmt16/ro-en/test/cache-c5fe17364be6807b.arrow\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      " "
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Loading cached processed dataset at datas/wmt16/ro-en/train/cache-1075e0b8ac206327.arrow\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      " "
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Loading cached processed dataset at datas/wmt16/ro-en/train/cache-9fe8e553b9907402.arrow\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      " "
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Loading cached processed dataset at datas/wmt16/ro-en/train/cache-fc42acdf7f6ebd0d.arrow\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      " "
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Loading cached processed dataset at datas/wmt16/ro-en/train/cache-196974f9ef2169b9.arrow\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      " "
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Loading cached processed dataset at datas/wmt16/ro-en/validation/cache-5906b054d2fcfe3c.arrow\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      " "
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Loading cached processed dataset at datas/wmt16/ro-en/validation/cache-fd904127e528ee0c.arrow\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      " "
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Loading cached processed dataset at datas/wmt16/ro-en/validation/cache-4e6ed6815ec94f7a.arrow\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      " "
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Loading cached processed dataset at datas/wmt16/ro-en/validation/cache-7154642fbead0298.arrow\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      " "
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Loading cached processed dataset at datas/wmt16/ro-en/test/cache-905606071ab5b8be.arrow\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      " "
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Loading cached processed dataset at datas/wmt16/ro-en/test/cache-75c11d7064f6a327.arrow\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      " "
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Loading cached processed dataset at datas/wmt16/ro-en/test/cache-b1c2b329c4e2d34e.arrow\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      " "
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Loading cached processed dataset at datas/wmt16/ro-en/test/cache-8670277652e40452.arrow\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "{'input_ids': [460, 354, 3794, 12, 10677, 20, 5046, 14, 4, 2546, 37, 8, 397, 5551, 30, 10113, 37, 3501, 19814, 18, 8465, 20, 4, 44690, 782, 2, 0], 'attention_mask': [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], 'labels': [902, 576, 2946, 76, 10815, 17, 5098, 14997, 5, 559, 1140, 43, 2434, 6624, 27, 50, 337, 19216, 46, 22174, 17, 2317, 121, 16825, 2, 0]}\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "DatasetDict({\n",
       "    train: Dataset({\n",
       "        features: ['input_ids', 'attention_mask', 'labels'],\n",
       "        num_rows: 20000\n",
       "    })\n",
       "    validation: Dataset({\n",
       "        features: ['input_ids', 'attention_mask', 'labels'],\n",
       "        num_rows: 200\n",
       "    })\n",
       "    test: Dataset({\n",
       "        features: ['input_ids', 'attention_mask', 'labels'],\n",
       "        num_rows: 200\n",
       "    })\n",
       "})"
      ]
     },
     "execution_count": 2,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "from datasets import load_dataset, load_from_disk\n",
    "\n",
    "#加载数据\n",
    "#dataset = load_dataset(path='wmt16', name='ro-en')\n",
    "dataset = load_from_disk('datas/wmt16/ro-en')\n",
    "\n",
    "#采样,数据量太大了跑不动\n",
    "dataset['train'] = dataset['train'].shuffle(1).select(range(20000))\n",
    "dataset['validation'] = dataset['validation'].shuffle(1).select(range(200))\n",
    "dataset['test'] = dataset['test'].shuffle(1).select(range(200))\n",
    "\n",
    "\n",
    "#数据预处理\n",
    "def preprocess_function(data):\n",
    "    #取出数据中的en和ro\n",
    "    en = [ex['en'] for ex in data['translation']]\n",
    "    ro = [ex['ro'] for ex in data['translation']]\n",
    "\n",
    "    #源语言直接编码就行了\n",
    "    data = tokenizer.batch_encode_plus(en, max_length=128, truncation=True)\n",
    "\n",
    "    #目标语言在特殊模块中编码\n",
    "    with tokenizer.as_target_tokenizer():\n",
    "        data['labels'] = tokenizer.batch_encode_plus(\n",
    "            ro, max_length=128, truncation=True)['input_ids']\n",
    "\n",
    "    return data\n",
    "\n",
    "\n",
    "dataset = dataset.map(function=preprocess_function,\n",
    "                      batched=True,\n",
    "                      batch_size=1000,\n",
    "                      num_proc=4,\n",
    "                      remove_columns=['translation'])\n",
    "\n",
    "print(dataset['train'][0])\n",
    "\n",
    "dataset"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "id": "3c1f61fb",
   "metadata": {
    "scrolled": false
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([[59542, 10455,   120,    80],\n",
       "        [59542,   301,    53,  4074]])"
      ]
     },
     "execution_count": 3,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "#这个函数和下面这个工具类等价,但我也是模仿实现的,不确定有没有出入\n",
    "#from transformers import DataCollatorForSeq2Seq\n",
    "#DataCollatorForSeq2Seq(tokenizer, model=model)\n",
    "\n",
    "import torch\n",
    "\n",
    "\n",
    "#数据批处理函数\n",
    "def collate_fn(data):\n",
    "    #求最长的label\n",
    "    max_length = max([len(i['labels']) for i in data])\n",
    "\n",
    "    #把所有的label都补pad到最长\n",
    "    for i in data:\n",
    "        pads = [-100] * (max_length - len(i['labels']))\n",
    "        i['labels'] = i['labels'] + pads\n",
    "\n",
    "    #把多个数据整合成一个tensor\n",
    "    data = tokenizer.pad(\n",
    "        encoded_inputs=data,\n",
    "        padding=True,\n",
    "        max_length=None,\n",
    "        pad_to_multiple_of=None,\n",
    "        return_tensors='pt',\n",
    "    )\n",
    "\n",
    "    #定义decoder_input_ids\n",
    "    data['decoder_input_ids'] = torch.full_like(data['labels'],\n",
    "                                                tokenizer.get_vocab()['<pad>'],\n",
    "                                                dtype=torch.long)\n",
    "    data['decoder_input_ids'][:, 1:] = data['labels'][:, :-1]\n",
    "    data['decoder_input_ids'][data['decoder_input_ids'] ==\n",
    "                              -100] = tokenizer.get_vocab()['<pad>']\n",
    "\n",
    "    return data\n",
    "\n",
    "\n",
    "data = [{\n",
    "    'input_ids': [21603, 10, 37, 3719, 13],\n",
    "    'attention_mask': [1, 1, 1, 1, 1],\n",
    "    'labels': [10455, 120, 80]\n",
    "}, {\n",
    "    'input_ids': [21603, 10, 7086, 8408, 563],\n",
    "    'attention_mask': [1, 1, 1, 1, 1],\n",
    "    'labels': [301, 53, 4074, 1669]\n",
    "}]\n",
    "\n",
    "collate_fn(data)['decoder_input_ids']"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "id": "0ad1647a",
   "metadata": {
    "scrolled": false
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "input_ids torch.Size([8, 51]) tensor([[  363,    63,    32,    51,   154,  1574,  5352,    14,     4,  2196,\n",
      "            14,   456,     8,  3562,    18,  1603,     4,  2196,   123, 16109,\n",
      "         23241,   350,  1994,     2,     0, 59542, 59542, 59542, 59542, 59542,\n",
      "         59542, 59542, 59542, 59542, 59542, 59542, 59542, 59542, 59542, 59542,\n",
      "         59542, 59542, 59542, 59542, 59542, 59542, 59542, 59542, 59542, 59542,\n",
      "         59542],\n",
      "        [ 7429,     7,    11, 12663,    35, 21169,  6268,    40,  1289, 17749,\n",
      "            56,   682,   198, 39728,    13,    47, 14297,     3,  1571,    45,\n",
      "          1834, 37194,    37, 10567,    13,     4,  9307,  1080,  6677, 32510,\n",
      "             7,  4608,    40, 42822,  1084,   340,   193,     4, 13310,  1000,\n",
      "           174,   183,  4944,    37,   311,  1634,   439,     2,     0, 59542,\n",
      "         59542]])\n",
      "attention_mask torch.Size([8, 51]) tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,\n",
      "         1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n",
      "         0, 0, 0],\n",
      "        [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,\n",
      "         1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,\n",
      "         1, 0, 0]])\n",
      "labels torch.Size([8, 69]) tensor([[ 1962,   764,    29, 17656,    27,  2992, 11095,     8, 30002,   411,\n",
      "           556,     5,  1513,    46,   178,   252,  2196,   118, 31274,     3,\n",
      "          7562,     3,  2760,    93,    17,   780,     2,     0,  -100,  -100,\n",
      "          -100,  -100,  -100,  -100,  -100,  -100,  -100,  -100,  -100,  -100,\n",
      "          -100,  -100,  -100,  -100,  -100,  -100,  -100,  -100,  -100,  -100,\n",
      "          -100,  -100,  -100,  -100,  -100,  -100,  -100,  -100,  -100,  -100,\n",
      "          -100,  -100,  -100,  -100,  -100,  -100,  -100,  -100,  -100],\n",
      "        [ 2750,   412,  9276,   258, 12663,    44,  9343,    40,  1289, 17749,\n",
      "            56,     8,  4906,    36,  7982,    29, 10616,  1756,   671,    39,\n",
      "             8,    73, 19473,    49,     3, 14761, 36499,   180,     9,   414,\n",
      "            38, 39494,    72,  6987,   230,  1640,    39, 21709,   470,    35,\n",
      "            28,  6724,    62, 11886,    22,  5838,    44,  9307,  1080,  6677,\n",
      "            40,   658,  1084,  1084,   340,  6724,    28,    43,  2886, 12316,\n",
      "            59,  3517,     5,   616,  1634,     5,   343,     2,     0]])\n",
      "decoder_input_ids torch.Size([8, 69]) tensor([[59542,  1962,   764,    29, 17656,    27,  2992, 11095,     8, 30002,\n",
      "           411,   556,     5,  1513,    46,   178,   252,  2196,   118, 31274,\n",
      "             3,  7562,     3,  2760,    93,    17,   780,     2,     0, 59542,\n",
      "         59542, 59542, 59542, 59542, 59542, 59542, 59542, 59542, 59542, 59542,\n",
      "         59542, 59542, 59542, 59542, 59542, 59542, 59542, 59542, 59542, 59542,\n",
      "         59542, 59542, 59542, 59542, 59542, 59542, 59542, 59542, 59542, 59542,\n",
      "         59542, 59542, 59542, 59542, 59542, 59542, 59542, 59542, 59542],\n",
      "        [59542,  2750,   412,  9276,   258, 12663,    44,  9343,    40,  1289,\n",
      "         17749,    56,     8,  4906,    36,  7982,    29, 10616,  1756,   671,\n",
      "            39,     8,    73, 19473,    49,     3, 14761, 36499,   180,     9,\n",
      "           414,    38, 39494,    72,  6987,   230,  1640,    39, 21709,   470,\n",
      "            35,    28,  6724,    62, 11886,    22,  5838,    44,  9307,  1080,\n",
      "          6677,    40,   658,  1084,  1084,   340,  6724,    28,    43,  2886,\n",
      "         12316,    59,  3517,     5,   616,  1634,     5,   343,     2]])\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "2500"
      ]
     },
     "execution_count": 4,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "import torch\n",
    "\n",
    "#数据加载器\n",
    "loader = torch.utils.data.DataLoader(\n",
    "    dataset=dataset['train'],\n",
    "    batch_size=8,\n",
    "    collate_fn=collate_fn,\n",
    "    shuffle=True,\n",
    "    drop_last=True,\n",
    ")\n",
    "\n",
    "for i, data in enumerate(loader):\n",
    "    break\n",
    "\n",
    "for k, v in data.items():\n",
    "    print(k, v.shape, v[:2])\n",
    "\n",
    "len(loader)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "id": "d9488731",
   "metadata": {
    "scrolled": false
   },
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Some weights of the model checkpoint at Helsinki-NLP/opus-mt-en-ro were not used when initializing MarianModel: ['final_logits_bias']\n",
      "- This IS expected if you are initializing MarianModel from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n",
      "- This IS NOT expected if you are initializing MarianModel from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "10563.4816\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "(tensor(1.5629, grad_fn=<NllLossBackward0>), torch.Size([8, 69, 59543]))"
      ]
     },
     "execution_count": 5,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "from transformers import AutoModelForSeq2SeqLM, MarianModel\n",
    "\n",
    "#加载模型\n",
    "#model = AutoModelForSeq2SeqLM.from_pretrained('Helsinki-NLP/opus-mt-en-ro')\n",
    "\n",
    "\n",
    "#定义下游任务模型\n",
    "class Model(torch.nn.Module):\n",
    "    def __init__(self):\n",
    "        super().__init__()\n",
    "        self.pretrained = MarianModel.from_pretrained(\n",
    "            'Helsinki-NLP/opus-mt-en-ro')\n",
    "\n",
    "        self.register_buffer('final_logits_bias',\n",
    "                             torch.zeros(1, tokenizer.vocab_size))\n",
    "\n",
    "        self.fc = torch.nn.Linear(512, tokenizer.vocab_size, bias=False)\n",
    "\n",
    "        #加载预训练模型的参数\n",
    "        parameters = AutoModelForSeq2SeqLM.from_pretrained(\n",
    "            'Helsinki-NLP/opus-mt-en-ro')\n",
    "        self.fc.load_state_dict(parameters.lm_head.state_dict())\n",
    "\n",
    "        self.criterion = torch.nn.CrossEntropyLoss()\n",
    "\n",
    "    def forward(self, input_ids, attention_mask, labels, decoder_input_ids):\n",
    "        logits = self.pretrained(input_ids=input_ids,\n",
    "                                 attention_mask=attention_mask,\n",
    "                                 decoder_input_ids=decoder_input_ids)\n",
    "        logits = logits.last_hidden_state\n",
    "\n",
    "        logits = self.fc(logits) + self.final_logits_bias\n",
    "\n",
    "        loss = self.criterion(logits.flatten(end_dim=1), labels.flatten())\n",
    "\n",
    "        return {'loss': loss, 'logits': logits}\n",
    "\n",
    "\n",
    "model = Model()\n",
    "\n",
    "#统计参数量\n",
    "print(sum(i.numel() for i in model.parameters()) / 10000)\n",
    "\n",
    "out = model(**data)\n",
    "\n",
    "out['loss'], out['logits'].shape"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "id": "78f3263f",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "{'score': 0.0,\n",
       " 'counts': [4, 2, 0, 0],\n",
       " 'totals': [4, 2, 0, 0],\n",
       " 'precisions': [100.0, 100.0, 0.0, 0.0],\n",
       " 'bp': 1.0,\n",
       " 'sys_len': 4,\n",
       " 'ref_len': 4}"
      ]
     },
     "execution_count": 6,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "from datasets import load_metric\n",
    "\n",
    "#加载评价函数\n",
    "metric = load_metric(path='sacrebleu')\n",
    "\n",
    "#试算\n",
    "metric.compute(predictions=['hello there', 'general kenobi'],\n",
    "               references=[['hello there'], ['general kenobi']])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "id": "c924a315",
   "metadata": {
    "scrolled": false
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "0\n",
      "input_ids= ▁Unfortunately for▁them, a RATP control▁team▁showed▁up.<pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>\n",
      "pred= Din păcate, ei, a-a făcut apariția o echipă de control RA RATP.,,,, - Din Din Din Din Din Din Din Din Din Din Din Din Din Din Din Din Din Din Din Din Din Din Din Din Din Din Din\n",
      "label= <pad> Din nefericire pentru ei, și-a făcut apariția o echipă de controlori RATP.<pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>\n",
      "2\n",
      "input_ids= You are a▁great▁power▁only▁if you▁have▁solutions.<pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>\n",
      "pred= Eşt tu o, doar daca ai solu soluțiitii.,,,,,-- - - - Tu Tu Tu Tu Tu Tu Tu Tu Tu Tu Tu Tu Tu Tu Tu Tu Tu Tu Tu Tu Tu Tu Tu Tu Tu Tu Tu Tu Tu Tu Tu Tu Tu Tu Tu Tu Tu Tu Tu Tu Tu Tu Tu Tu Tu Tu Tu Tu Tu Tu Tu Tu Tu Tu Tu Tu Tu Tu\n",
      "label= <pad> Dar esti mare putere doar daca ai și solutii.<pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>\n",
      "4\n",
      "input_ids= ▁Most social democratic▁branches▁proposed the date of▁October 11 for the▁extraordinary▁congress, and the exact date▁will be▁decided on▁Monday by the▁Executive▁Committee of the▁party.<pad><pad><pad>\n",
      "pred= Majoritatea majoritate a filialelor social-democrate au propus dataul datareş, 11 octombrie, iar data exactăa va acestuiaului va fi hotara luni de ca Executiv al Partidului.,iiiiiul Comitetul Comitetul Comitetul Comitetul În În\n",
      "label= <pad> Marea majoritate a filialelor social-democrate au propus Congres extraordinar PSD pe 11 octombrie, iar data exacta a Congresului va fi decisa luni de Comitetul Executiv al partidului.<pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>\n",
      "6\n",
      "input_ids= Beethoven, Brahms,▁Bartok, Enescu▁were▁working▁people,▁artists, and not▁commercial▁representatives.<pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>\n",
      "pred= Beethoven, Brahms, Bartok, Enescu erau oameni, lucraeau, art erau arti, nu reprezentanti comerciali. DE,,, Be Be Be Be Be\n",
      "label= <pad> Beethoven, Brahms, Bartok, Enescu erau oameni care munceau, care erau artisti și nu reprezentanti comerciali.<pad><pad><pad><pad><pad><pad><pad><pad>\n",
      "8\n",
      "input_ids= ▁Before▁arriving▁at the Court of▁Appeal of Iasi, the van▁transporting the rapists from Vaslui▁was▁involved in an accident in Păcurari district as the car▁was▁entering Iasi▁city▁through the▁beltway.<pad><pad><pad><pad><pad><pad>\n",
      "pred= Înainte de sosirea ajunge la Curtea de Apel dinși, duba care viol au transportați violatorii din Vaslui a fost implicată într-un accident în în districtul Purari, când intrând în orașulși prin autostrad,, = = = = = = =\n",
      "label= <pad> Înainte de a ajunge la Curtea de Apel Iași, duba cu care erau transportați violatorii din Vaslui a fost implicată într-un accident produs în cartierul Păcurari, mașina intrând în Iași pe centură.<pad><pad><pad><pad><pad><pad><pad><pad>\n",
      "10\n",
      "input_ids= ▁Here's the▁lesson.<pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>\n",
      "pred= Uite leccția.ți,: - - - Iată Iată Iată Iată Iată Iată Iată Iată Iată Iată Iată Iată Iată Iată Iată Iată Iată Iată Iată Iată Iată Iată Iată Iată Iată Iată Iată Iată Iată Iată Iată Iată Iată Iată Iată Iată Iată Iată Iată Iată Iată Iată Iată Iată Iată Iată Iată Iată Iată Iată Iată Iată Iată Iată Iată Iată Iată Iată Iată Iată Iată Iată Iată Iată Iată Iată Iată Iată\n",
      "label= <pad> Iată lecția.<pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>\n",
      "{'score': 5.010387094417482, 'counts': [1370, 838, 532, 332], 'totals': [4437, 4349, 4261, 4173], 'precisions': [30.87671850349335, 19.268797424695332, 12.48533208167097, 7.955907021327582], 'bp': 0.32134932605584, 'sys_len': 4437, 'ref_len': 9474}\n"
     ]
    }
   ],
   "source": [
    "#测试\n",
    "def test():\n",
    "    model.eval()\n",
    "\n",
    "    #数据加载器\n",
    "    loader_test = torch.utils.data.DataLoader(\n",
    "        dataset=dataset['test'],\n",
    "        batch_size=8,\n",
    "        collate_fn=collate_fn,\n",
    "        shuffle=True,\n",
    "        drop_last=True,\n",
    "    )\n",
    "\n",
    "    predictions = []\n",
    "    references = []\n",
    "    for i, data in enumerate(loader_test):\n",
    "        #计算\n",
    "        with torch.no_grad():\n",
    "            out = model(**data)\n",
    "\n",
    "        pred = tokenizer.batch_decode(out['logits'].argmax(dim=2))\n",
    "        label = tokenizer.batch_decode(data['decoder_input_ids'])\n",
    "        predictions.extend(pred)\n",
    "        references.extend(label)\n",
    "\n",
    "        if i % 2 == 0:\n",
    "            print(i)\n",
    "            input_ids = tokenizer.decode(data['input_ids'][0])\n",
    "\n",
    "            print('input_ids=', input_ids)\n",
    "            print('pred=', pred[0])\n",
    "            print('label=', label[0])\n",
    "\n",
    "        if i == 10:\n",
    "            break\n",
    "\n",
    "    references = [[j] for j in references]\n",
    "    metric_out = metric.compute(predictions=predictions, references=references)\n",
    "    print(metric_out)\n",
    "\n",
    "\n",
    "test()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "id": "1375fcad",
   "metadata": {
    "scrolled": false
   },
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/root/anaconda3/envs/cpu/lib/python3.6/site-packages/transformers/optimization.py:309: FutureWarning: This implementation of AdamW is deprecated and will be removed in a future version. Use the PyTorch implementation torch.optim.AdamW instead, or set `no_deprecation_warning=True` to disable this warning\n",
      "  FutureWarning,\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "0 1.9611096382141113 0.0 {'score': 6.2696211486540125, 'counts': [127, 82, 54, 32], 'totals': [363, 355, 347, 339], 'precisions': [34.98622589531681, 23.098591549295776, 15.561959654178674, 9.43952802359882], 'bp': 0.33776683830627435, 'sys_len': 363, 'ref_len': 757} 1.9992e-05\n",
      "50 1.214351773262024 0.0013736263736263737 {'score': 4.345771608240105, 'counts': [178, 122, 89, 65], 'totals': [663, 655, 647, 639], 'precisions': [26.84766214177979, 18.625954198473284, 13.75579598145286, 10.172143974960877], 'bp': 0.267199776786503, 'sys_len': 663, 'ref_len': 1538} 1.9592e-05\n",
      "100 0.7556186318397522 0.006818181818181818 {'score': 12.965422783170721, 'counts': [171, 130, 100, 77], 'totals': [360, 352, 344, 336], 'precisions': [47.5, 36.93181818181818, 29.069767441860463, 22.916666666666668], 'bp': 0.3943345747424834, 'sys_len': 360, 'ref_len': 695} 1.9192000000000002e-05\n",
      "150 0.7815563082695007 0.01201923076923077 {'score': 11.647085644686467, 'counts': [155, 117, 93, 74], 'totals': [345, 337, 329, 321], 'precisions': [44.927536231884055, 34.718100890207715, 28.267477203647417, 23.052959501557634], 'bp': 0.3668146678458856, 'sys_len': 345, 'ref_len': 691} 1.8792000000000002e-05\n",
      "200 0.6868870258331299 0.009375 {'score': 14.853154322281833, 'counts': [129, 97, 72, 51], 'totals': [257, 249, 241, 233], 'precisions': [50.19455252918288, 38.95582329317269, 29.87551867219917, 21.888412017167383], 'bp': 0.4417016509944529, 'sys_len': 257, 'ref_len': 467} 1.8392e-05\n",
      "250 1.064142107963562 0.0 {'score': 6.701338582841909, 'counts': [150, 97, 68, 48], 'totals': [426, 418, 410, 402], 'precisions': [35.2112676056338, 23.205741626794257, 16.585365853658537, 11.940298507462687], 'bp': 0.3341236545039393, 'sys_len': 426, 'ref_len': 893} 1.7992e-05\n",
      "300 0.8150560259819031 0.00909090909090909 {'score': 20.308234553259478, 'counts': [215, 162, 126, 93], 'totals': [381, 373, 365, 357], 'precisions': [56.43044619422572, 43.43163538873995, 34.52054794520548, 26.050420168067227], 'bp': 0.527071035393402, 'sys_len': 381, 'ref_len': 625} 1.7592000000000004e-05\n",
      "350 0.6723054647445679 0.00211864406779661 {'score': 9.252518807805819, 'counts': [172, 124, 91, 64], 'totals': [415, 407, 399, 391], 'precisions': [41.44578313253012, 30.466830466830466, 22.80701754385965, 16.36828644501279], 'bp': 0.3531141668772693, 'sys_len': 415, 'ref_len': 847} 1.7192e-05\n",
      "400 0.8973532319068909 0.00211864406779661 {'score': 6.743549628192731, 'counts': [147, 114, 86, 63], 'totals': [416, 408, 400, 392], 'precisions': [35.33653846153846, 27.941176470588236, 21.5, 16.071428571428573], 'bp': 0.27902823329013315, 'sys_len': 416, 'ref_len': 947} 1.6792e-05\n",
      "450 0.8339553475379944 0.007075471698113208 {'score': 9.216910440647712, 'counts': [155, 116, 84, 63], 'totals': [360, 352, 344, 336], 'precisions': [43.05555555555556, 32.95454545454545, 24.41860465116279, 18.75], 'bp': 0.32465246735834974, 'sys_len': 360, 'ref_len': 765} 1.6392e-05\n",
      "500 0.9022101759910583 0.0046875 {'score': 9.032123755791558, 'counts': [225, 169, 132, 103], 'totals': [561, 553, 545, 537], 'precisions': [40.106951871657756, 30.560578661844485, 24.220183486238533, 19.180633147113593], 'bp': 0.3288020223101731, 'sys_len': 561, 'ref_len': 1185} 1.5992000000000002e-05\n",
      "550 0.8874062895774841 0.0055147058823529415 {'score': 4.77901350843722, 'counts': [139, 100, 70, 47], 'totals': [508, 500, 492, 484], 'precisions': [27.362204724409448, 20.0, 14.227642276422765, 9.710743801652892], 'bp': 0.28820176010840903, 'sys_len': 508, 'ref_len': 1140} 1.5592e-05\n",
      "600 0.96967613697052 0.0 {'score': 10.273605765182102, 'counts': [122, 83, 60, 41], 'totals': [280, 272, 264, 256], 'precisions': [43.57142857142857, 30.514705882352942, 22.727272727272727, 16.015625], 'bp': 0.3895133487108618, 'sys_len': 280, 'ref_len': 544} 1.5192000000000003e-05\n",
      "650 0.9132704734802246 0.0038461538461538464 {'score': 5.886629775498404, 'counts': [153, 105, 74, 54], 'totals': [464, 456, 448, 440], 'precisions': [32.974137931034484, 23.026315789473685, 16.517857142857142, 12.272727272727273], 'bp': 0.29719640306158684, 'sys_len': 464, 'ref_len': 1027} 1.4792000000000002e-05\n",
      "700 0.5745275020599365 0.00646551724137931 {'score': 8.567677943813583, 'counts': [149, 117, 89, 68], 'totals': [400, 392, 384, 376], 'precisions': [37.25, 29.846938775510203, 23.177083333333332, 18.085106382978722], 'bp': 0.3279152788995885, 'sys_len': 400, 'ref_len': 846} 1.4392000000000002e-05\n",
      "750 0.8405245542526245 0.0031645569620253164 {'score': 5.856952905090346, 'counts': [181, 139, 111, 86], 'totals': [496, 488, 480, 472], 'precisions': [36.49193548387097, 28.483606557377048, 23.125, 18.220338983050848], 'bp': 0.22767440710111092, 'sys_len': 496, 'ref_len': 1230} 1.3992000000000001e-05\n",
      "800 0.7973319888114929 0.006818181818181818 {'score': 3.0314588265131883, 'counts': [90, 61, 40, 25], 'totals': [365, 357, 349, 341], 'precisions': [24.65753424657534, 17.086834733893557, 11.461318051575931, 7.331378299120234], 'bp': 0.22100078153881905, 'sys_len': 365, 'ref_len': 916} 1.3592000000000001e-05\n",
      "850 1.1973825693130493 0.0016666666666666668 {'score': 6.1924458732831225, 'counts': [177, 120, 84, 59], 'totals': [519, 511, 503, 495], 'precisions': [34.104046242774565, 23.483365949119374, 16.69980119284294, 11.919191919191919], 'bp': 0.30990681534546255, 'sys_len': 519, 'ref_len': 1127} 1.3192e-05\n",
      "900 0.63591068983078 0.004310344827586207 {'score': 6.120664646698392, 'counts': [132, 105, 84, 69], 'totals': [405, 397, 389, 381], 'precisions': [32.592592592592595, 26.448362720403022, 21.59383033419023, 18.11023622047244], 'bp': 0.25401286329038647, 'sys_len': 405, 'ref_len': 960} 1.2792e-05\n",
      "950 0.8044742941856384 0.00390625 {'score': 5.059251102982869, 'counts': [275, 195, 137, 98], 'totals': [929, 921, 913, 905], 'precisions': [29.60172228202368, 21.172638436482085, 15.005476451259584, 10.828729281767956], 'bp': 0.2832085909501826, 'sys_len': 929, 'ref_len': 2101} 1.2392000000000003e-05\n",
      "1000 0.5571568608283997 0.00909090909090909 {'score': 16.477790591634765, 'counts': [188, 158, 131, 109], 'totals': [363, 355, 347, 339], 'precisions': [51.790633608815426, 44.50704225352113, 37.752161383285305, 32.15339233038348], 'bp': 0.40289032152913296, 'sys_len': 363, 'ref_len': 693} 1.1992000000000001e-05\n",
      "1050 1.0285018682479858 0.0023148148148148147 {'score': 9.444810125815195, 'counts': [151, 105, 77, 56], 'totals': [373, 365, 357, 349], 'precisions': [40.48257372654155, 28.767123287671232, 21.568627450980394, 16.045845272206304], 'bp': 0.37484853398897106, 'sys_len': 373, 'ref_len': 739} 1.1592000000000002e-05\n",
      "1100 0.8140971064567566 0.006696428571428571 {'score': 6.497792239022409, 'counts': [128, 92, 67, 52], 'totals': [379, 371, 363, 355], 'precisions': [33.773087071240106, 24.797843665768195, 18.457300275482094, 14.647887323943662], 'bp': 0.29787486576801736, 'sys_len': 379, 'ref_len': 838} 1.1192e-05\n",
      "1150 1.2302709817886353 0.01 {'score': 10.598643864535322, 'counts': [147, 104, 74, 53], 'totals': [339, 331, 323, 315], 'precisions': [43.36283185840708, 31.419939577039276, 22.910216718266255, 16.825396825396826], 'bp': 0.3937049955736953, 'sys_len': 339, 'ref_len': 655} 1.0792000000000001e-05\n",
      "1200 0.7739734053611755 0.00625 {'score': 12.167317897795243, 'counts': [128, 93, 66, 46], 'totals': [272, 264, 256, 248], 'precisions': [47.05882352941177, 35.22727272727273, 25.78125, 18.548387096774192], 'bp': 0.40776721310207975, 'sys_len': 272, 'ref_len': 516} 1.0392e-05\n",
      "1250 0.7288955450057983 0.00909090909090909 {'score': 22.941014783393737, 'counts': [232, 172, 128, 100], 'totals': [383, 375, 367, 359], 'precisions': [60.574412532637076, 45.86666666666667, 34.87738419618529, 27.855153203342617], 'bp': 0.5659833599899339, 'sys_len': 383, 'ref_len': 601} 9.992e-06\n",
      "1300 0.8120128512382507 0.00423728813559322 {'score': 6.605692366402147, 'counts': [143, 103, 74, 54], 'totals': [392, 384, 376, 368], 'precisions': [36.47959183673469, 26.822916666666668, 19.680851063829788, 14.673913043478262], 'bp': 0.2865047968601901, 'sys_len': 392, 'ref_len': 882} 9.592e-06\n",
      "1350 0.7176678776741028 0.01059322033898305 {'score': 10.394600821240477, 'counts': [179, 132, 97, 71], 'totals': [403, 395, 387, 379], 'precisions': [44.416873449131515, 33.41772151898734, 25.064599483204134, 18.733509234828496], 'bp': 0.359754830399703, 'sys_len': 403, 'ref_len': 815} 9.192000000000001e-06\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "1400 0.9339891672134399 0.00436046511627907 {'score': 6.273109997311125, 'counts': [197, 139, 106, 81], 'totals': [607, 599, 591, 583], 'precisions': [32.45469522240527, 23.20534223706177, 17.93570219966159, 13.893653516295025], 'bp': 0.30139275785881325, 'sys_len': 607, 'ref_len': 1335} 8.792e-06\n",
      "1450 0.7789385318756104 0.0 {'score': 10.002563834271076, 'counts': [141, 105, 78, 56], 'totals': [340, 332, 324, 316], 'precisions': [41.470588235294116, 31.626506024096386, 24.074074074074073, 17.72151898734177], 'bp': 0.36572179669341776, 'sys_len': 340, 'ref_len': 682} 8.392e-06\n",
      "1500 0.9499245882034302 0.003968253968253968 {'score': 8.773482938347657, 'counts': [173, 125, 92, 69], 'totals': [443, 435, 427, 419], 'precisions': [39.05191873589165, 28.735632183908045, 21.54566744730679, 16.46778042959427], 'bp': 0.34926695473651137, 'sys_len': 443, 'ref_len': 909} 7.992e-06\n",
      "1550 0.8153000473976135 0.0021929824561403508 {'score': 9.020920286090554, 'counts': [164, 117, 84, 59], 'totals': [403, 395, 387, 379], 'precisions': [40.69478908188586, 29.620253164556964, 21.705426356589147, 15.567282321899736], 'bp': 0.3570866980741236, 'sys_len': 403, 'ref_len': 818} 7.592e-06\n",
      "1600 0.8967578411102295 0.009615384615384616 {'score': 11.507622648637492, 'counts': [157, 117, 90, 73], 'totals': [366, 358, 350, 342], 'precisions': [42.89617486338798, 32.68156424581006, 25.714285714285715, 21.34502923976608], 'bp': 0.38854153983162587, 'sys_len': 366, 'ref_len': 712} 7.192e-06\n",
      "1650 0.9030292630195618 0.0 {'score': 7.391437469706476, 'counts': [164, 122, 92, 65], 'totals': [459, 451, 443, 435], 'precisions': [35.729847494553375, 27.0509977827051, 20.767494356659142, 14.942528735632184], 'bp': 0.3158444458311125, 'sys_len': 459, 'ref_len': 988} 6.792000000000001e-06\n",
      "1700 1.0304319858551025 0.004629629629629629 {'score': 6.944188717062315, 'counts': [129, 89, 67, 49], 'totals': [363, 355, 347, 339], 'precisions': [35.53719008264463, 25.070422535211268, 19.30835734870317, 14.454277286135694], 'bp': 0.31097458912490183, 'sys_len': 363, 'ref_len': 787} 6.392000000000001e-06\n",
      "1750 0.8213488459587097 0.008333333333333333 {'score': 11.70482375481758, 'counts': [182, 134, 100, 73], 'totals': [405, 397, 389, 381], 'precisions': [44.93827160493827, 33.75314861460957, 25.70694087403599, 19.16010498687664], 'bp': 0.3981256363375196, 'sys_len': 405, 'ref_len': 778} 5.992e-06\n",
      "1800 0.8443653583526611 0.0018115942028985507 {'score': 6.972043907080832, 'counts': [163, 126, 100, 79], 'totals': [486, 478, 470, 462], 'precisions': [33.53909465020576, 26.359832635983263, 21.27659574468085, 17.0995670995671], 'bp': 0.2927620658742936, 'sys_len': 486, 'ref_len': 1083} 5.592000000000001e-06\n",
      "1850 1.024266004562378 0.005319148936170213 {'score': 13.07505508489155, 'counts': [151, 102, 73, 51], 'totals': [315, 307, 299, 291], 'precisions': [47.93650793650794, 33.22475570032573, 24.414715719063544, 17.52577319587629], 'bp': 0.45507110964807235, 'sys_len': 315, 'ref_len': 563} 5.1920000000000004e-06\n",
      "1900 0.8580136299133301 0.010775862068965518 {'score': 8.046040437533932, 'counts': [153, 109, 81, 59], 'totals': [399, 391, 383, 375], 'precisions': [38.34586466165413, 27.877237851662404, 21.148825065274153, 15.733333333333333], 'bp': 0.32946811774409757, 'sys_len': 399, 'ref_len': 842} 4.792000000000001e-06\n",
      "1950 0.8060821294784546 0.006944444444444444 {'score': 6.5102886276463225, 'counts': [131, 92, 67, 46], 'totals': [380, 372, 364, 356], 'precisions': [34.473684210526315, 24.731182795698924, 18.406593406593405, 12.92134831460674], 'bp': 0.30679394447836644, 'sys_len': 380, 'ref_len': 829} 4.3920000000000005e-06\n",
      "2000 0.7858167886734009 0.005952380952380952 {'score': 10.689836573699877, 'counts': [120, 94, 74, 57], 'totals': [274, 266, 258, 250], 'precisions': [43.7956204379562, 35.338345864661655, 28.68217054263566, 22.8], 'bp': 0.33702733442155364, 'sys_len': 274, 'ref_len': 572} 3.992e-06\n",
      "2050 0.8631567358970642 0.007142857142857143 {'score': 6.77129997748059, 'counts': [184, 127, 92, 65], 'totals': [524, 516, 508, 500], 'precisions': [35.11450381679389, 24.612403100775193, 18.11023622047244, 13.0], 'bp': 0.3188196262744, 'sys_len': 524, 'ref_len': 1123} 3.5920000000000005e-06\n",
      "2100 0.7178978323936462 0.0019230769230769232 {'score': 6.9279743732515575, 'counts': [162, 123, 95, 74], 'totals': [469, 461, 453, 445], 'precisions': [34.54157782515991, 26.68112798264642, 20.97130242825607, 16.629213483146067], 'bp': 0.29096811803639777, 'sys_len': 469, 'ref_len': 1048} 3.192e-06\n",
      "2150 0.9758418798446655 0.002551020408163265 {'score': 11.982637019927257, 'counts': [158, 111, 77, 48], 'totals': [339, 331, 323, 315], 'precisions': [46.607669616519175, 33.53474320241692, 23.8390092879257, 15.238095238095237], 'bp': 0.4365254701384091, 'sys_len': 339, 'ref_len': 620} 2.792e-06\n",
      "2200 0.8519835472106934 0.010416666666666666 {'score': 9.990321528635338, 'counts': [169, 126, 96, 75], 'totals': [415, 407, 399, 391], 'precisions': [40.72289156626506, 30.95823095823096, 24.06015037593985, 19.18158567774936], 'bp': 0.3617262857463728, 'sys_len': 415, 'ref_len': 837} 2.392e-06\n",
      "2250 1.0556378364562988 0.0035714285714285713 {'score': 9.733125539916944, 'counts': [199, 146, 111, 84], 'totals': [473, 465, 457, 449], 'precisions': [42.07188160676533, 31.397849462365592, 24.288840262582056, 18.70824053452116], 'bp': 0.34967890575171634, 'sys_len': 473, 'ref_len': 970} 1.992e-06\n",
      "2300 0.7630690336227417 0.006818181818181818 {'score': 16.61859179553974, 'counts': [199, 143, 103, 69], 'totals': [433, 425, 417, 409], 'precisions': [45.95842956120092, 33.64705882352941, 24.700239808153476, 16.87041564792176], 'bp': 0.5865559037193535, 'sys_len': 433, 'ref_len': 664} 1.5920000000000002e-06\n",
      "2350 0.7255266308784485 0.005555555555555556 {'score': 14.147476871181382, 'counts': [153, 113, 84, 63], 'totals': [310, 302, 294, 286], 'precisions': [49.354838709677416, 37.41721854304636, 28.571428571428573, 22.027972027972027], 'bp': 0.43087576276754513, 'sys_len': 310, 'ref_len': 571} 1.1920000000000002e-06\n",
      "2400 0.9680993556976318 0.00646551724137931 {'score': 13.537030700604614, 'counts': [194, 134, 88, 61], 'totals': [386, 378, 370, 362], 'precisions': [50.259067357512954, 35.44973544973545, 23.783783783783782, 16.85082872928177], 'bp': 0.4656836655472298, 'sys_len': 386, 'ref_len': 681} 7.920000000000001e-07\n",
      "2450 0.7554168105125427 0.0030864197530864196 {'score': 4.602943840098253, 'counts': [157, 119, 92, 71], 'totals': [591, 583, 575, 567], 'precisions': [26.56514382402707, 20.41166380789022, 16.0, 12.522045855379188], 'bp': 0.25353575437126313, 'sys_len': 591, 'ref_len': 1402} 3.92e-07\n"
     ]
    }
   ],
   "source": [
    "from transformers import AdamW\n",
    "from transformers.optimization import get_scheduler\n",
    "\n",
    "\n",
    "#训练\n",
    "def train():\n",
    "    optimizer = AdamW(model.parameters(), lr=2e-5)\n",
    "    scheduler = get_scheduler(name='linear',\n",
    "                              num_warmup_steps=0,\n",
    "                              num_training_steps=len(loader),\n",
    "                              optimizer=optimizer)\n",
    "\n",
    "    model.train()\n",
    "    for i, data in enumerate(loader):\n",
    "        out = model(**data)\n",
    "        loss = out['loss']\n",
    "\n",
    "        loss.backward()\n",
    "        torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)\n",
    "\n",
    "        optimizer.step()\n",
    "        scheduler.step()\n",
    "\n",
    "        optimizer.zero_grad()\n",
    "        model.zero_grad()\n",
    "\n",
    "        if i % 50 == 0:\n",
    "            out = out['logits'].argmax(dim=2)\n",
    "            correct = (data['decoder_input_ids'] == out).sum().item()\n",
    "            total = data['decoder_input_ids'].shape[1] * 8\n",
    "            accuracy = correct / total\n",
    "            del correct\n",
    "            del total\n",
    "\n",
    "            predictions = []\n",
    "            references = []\n",
    "            for j in range(8):\n",
    "                pred = tokenizer.decode(out[j])\n",
    "                label = tokenizer.decode(data['decoder_input_ids'][j])\n",
    "                predictions.append(pred)\n",
    "                references.append([label])\n",
    "\n",
    "            metric_out = metric.compute(predictions=predictions,\n",
    "                                        references=references)\n",
    "\n",
    "            lr = optimizer.state_dict()['param_groups'][0]['lr']\n",
    "\n",
    "            print(i, loss.item(), accuracy, metric_out, lr)\n",
    "\n",
    "    torch.save(model, 'models/7.翻译.model')\n",
    "\n",
    "\n",
    "train()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "id": "ed397281",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "0\n",
      "input_ids= ▁Two▁brothers from Tomești▁swallowed▁sleeping▁pills.<pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>\n",
      "pred= Doi fraţiăţiți din din Tomești au înghiţitghițit somnifere.,u:: - Doi Doi Doi Doi Doi Doi Doi Doi Doi Doi Doi Doi Doi Doi Doi Doi Doi Doi Doi Doi Doi Doi Doi Doi Doi Doi Doi Doi Doi Doi Doi Doi Doi Doi Doi Doi Doi Doi Doi Doi Doi Doi Doi Doi Doi Doi\n",
      "label= <pad> Doi frățiori din Tomești au înghițit somnifere.<pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>\n",
      "2\n",
      "input_ids= ▁Whatever that▁does,▁it▁won't be▁pleasant.<pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>\n",
      "pred= Orice ar face asta, nu va fi plăcut., nu nu Nu Oric Oric Oric Oric Oric Oric Oric Oric Oric Oric Oric Oric Oric Oric Oric Oric Oric Oric Oric Oric Oric Oric Oric Oric Oric Oric Oric Oric Oric Oric Oric Oric Oric Oric Oric Oric Oric Oric Oric Oric Oric Oric Oric Oric Oric Oric Oric Oric Oric Oric\n",
      "label= <pad> Orice ar însemna asta, nu va fi plăcut.<pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>\n",
      "4\n",
      "input_ids= \"During the▁investigations, the▁two▁young men▁said▁they▁wanted to▁reach France,▁which is▁why▁they▁made contact with a▁countryman,▁through▁which▁they met with the▁Italian▁driver. The▁latter▁has▁offered to▁carry▁them to▁their▁destination,▁where he▁would▁receive 2,000 euros per▁person\",▁according to the▁Border▁Police▁press▁release.\n",
      "pred= \"În investigari, cei au spus ca vortioneaza sa ajunga in Franta, motiv pentru care au luat legatura cu un comnational, prin care caruia s-au intalnit cu soferul italian. acesta s-a oferit sa-i duce la la destinatie, unde va sa primeasca 2.000 euro euro per persoana\", potrivit spune in comunicatul detiei de Frontiera.\n",
      "label= <pad> \"La cercetari, tinerii au declarat ca intentionau sa ajunga în Franta, fapt pentru care au luat legatura cu un conational, prin intermediul caruia s-au intalnit cu soferul italian, care s-a oferit sa-i transporte pana la destinatie, unde urma sa primeasca 2.000 de euro de persoana\", se arata în comunicatul Politiei de Frontiera.\n",
      "6\n",
      "input_ids= You are a▁great▁power▁only▁if you▁have▁solutions.<pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>\n",
      "pred= Sunte tu o, doar daca ai solu soluțiitii., este este- - - - - - Sunte Sunte Sunte Sunte Sunte Sunte Sunte Sunte Sunte Sunte Sunte\n",
      "label= <pad> Dar esti mare putere doar daca ai și solutii.<pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>\n",
      "8\n",
      "input_ids= In the▁eyes of the radical▁left▁wing, Brussels is an agent of▁international capitalism and a▁promoter of▁globalisation▁which▁imposed▁austerity to the▁poor.<pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>\n",
      "pred= În ochii aii radicale,-ul este un capitalismului interna și un promotor al globalizării care a impus austeritate celor celorlor.. În În În În În În În În În În În În În În În În În În În În În În În În În În În În În În În În În În În În În În În În În În În În În În\n",
      "label= <pad> În ochii stângii radicale Bruxelles-ul este agentul capitalismului internațional și un promotor al globalizării care a impus austeritatea săracilor.<pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>\n",
      "10\n",
      "input_ids= ▁Hungary is▁during a concerted action to▁close▁its▁borders in▁order to stop▁immigrants from▁entering the▁country.<pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>\n",
      "pred= Ungaria se în-o actiune concertata de i-si inchide frontiereletele pentru a-pana imigran imigrantilor iniiii pentru Ungaria Ungaria Ungaria Ungaria Ungaria Ungaria Ungaria Ungaria Ungaria Ungaria Ungaria Ungaria Ungaria Ungaria Ungaria Ungaria Ungaria Ungaria Ungaria Ungaria Ungaria Ungaria Ungaria Ungaria Ungaria Ungaria Ungaria Ungaria Ungaria Ungaria Ungaria Ungaria Ungaria Ungaria Ungaria Ungaria Ungaria Ungaria Ungaria Ungaria Ungaria Ungaria Ungaria Ungaria Ungaria Ungaria Ungaria\n",
      "label= <pad> Ungaria este intr-o actiune concertata de a-și inchide granitele pentru a stavili accesul imigrantilor.<pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>\n",
      "{'score': 4.89127681288785, 'counts': [1474, 915, 588, 374], 'totals': [4799, 4711, 4623, 4535], 'precisions': [30.714732235882476, 19.422627892167267, 12.7190136275146, 8.24696802646086], 'bp': 0.3092584856256912, 'sys_len': 4799, 'ref_len': 10431}\n"
     ]
    }
   ],
   "source": [
    "model = torch.load('models/7.翻译.model')\n",
    "test()"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.13"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
