{
 "metadata": {
  "kernelspec": {
   "language": "python",
   "display_name": "Python 3",
   "name": "python3"
  },
  "language_info": {
   "name": "python",
   "version": "3.7.12",
   "mimetype": "text/x-python",
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "pygments_lexer": "ipython3",
   "nbconvert_exporter": "python",
   "file_extension": ".py"
  }
 },
 "nbformat_minor": 4,
 "nbformat": 4,
 "cells": [
  {
   "cell_type": "code",
   "source": [
    "trigger_label = '触发词'\n",
    "schemas = ['O', 'B-地块编码','I-地块编码', 'B-地块位置', 'I-地块位置', 'B-出让面积', 'I-出让面积', 'B-土地用途', 'I-土地用途', 'B-容积率', 'I-容积率', \n",
    "           'B-起始价', 'I-起始价', 'B-成交价', 'I-成交价', 'B-溢价率', 'I-溢价率', 'B-成交时间', 'I-成交时间', 'B-受让人', 'I-受让人', \n",
    "           'B-城市', 'I-城市', 'B-触发词', 'I-触发词']\n",
    "id2tag = {idx: tag for idx, tag in enumerate(schemas)}\n",
    "tag2id = {tag: idx for idx, tag in enumerate(schemas)}\n",
    "\n",
    "id2tag,tag2id"
   ],
   "metadata": {
    "execution": {
     "iopub.status.busy": "2022-08-13T11:48:48.462148Z",
     "iopub.execute_input": "2022-08-13T11:48:48.462538Z",
     "iopub.status.idle": "2022-08-13T11:48:48.482341Z",
     "shell.execute_reply.started": "2022-08-13T11:48:48.462505Z",
     "shell.execute_reply": "2022-08-13T11:48:48.480536Z"
    },
    "trusted": true,
    "pycharm": {
     "name": "#%%\n"
    }
   },
   "execution_count": 41,
   "outputs": [
    {
     "data": {
      "text/plain": "({0: 'O',\n  1: 'B-地块编码',\n  2: 'I-地块编码',\n  3: 'B-地块位置',\n  4: 'I-地块位置',\n  5: 'B-出让面积',\n  6: 'I-出让面积',\n  7: 'B-土地用途',\n  8: 'I-土地用途',\n  9: 'B-容积率',\n  10: 'I-容积率',\n  11: 'B-起始价',\n  12: 'I-起始价',\n  13: 'B-成交价',\n  14: 'I-成交价',\n  15: 'B-溢价率',\n  16: 'I-溢价率',\n  17: 'B-成交时间',\n  18: 'I-成交时间',\n  19: 'B-受让人',\n  20: 'I-受让人',\n  21: 'B-城市',\n  22: 'I-城市',\n  23: 'B-触发词',\n  24: 'I-触发词'},\n {'O': 0,\n  'B-地块编码': 1,\n  'I-地块编码': 2,\n  'B-地块位置': 3,\n  'I-地块位置': 4,\n  'B-出让面积': 5,\n  'I-出让面积': 6,\n  'B-土地用途': 7,\n  'I-土地用途': 8,\n  'B-容积率': 9,\n  'I-容积率': 10,\n  'B-起始价': 11,\n  'I-起始价': 12,\n  'B-成交价': 13,\n  'I-成交价': 14,\n  'B-溢价率': 15,\n  'I-溢价率': 16,\n  'B-成交时间': 17,\n  'I-成交时间': 18,\n  'B-受让人': 19,\n  'I-受让人': 20,\n  'B-城市': 21,\n  'I-城市': 22,\n  'B-触发词': 23,\n  'I-触发词': 24})"
     },
     "execution_count": 41,
     "metadata": {},
     "output_type": "execute_result"
    }
   ]
  },
  {
   "cell_type": "code",
   "source": [
    "# 读取原始训练据并进行标准化处理\n",
    "import json\n",
    "import numpy, pandas as pd\n",
    "from datasets import Dataset\n",
    "\n",
    "def processLabelInALine(line):\n",
    "    # 初始化空数组\n",
    "    line = line.strip()\n",
    "    if len(line) == 0:\n",
    "        return\n",
    "    linejson = json.loads(line)\n",
    "    nertag = numpy.zeros(len(linejson[\"data\"]))\n",
    "    tokens = [i for i in linejson[\"data\"]]\n",
    "    for (start, stop, tagstr) in linejson[\"label\"]:\n",
    "        if tagstr == \"NN\":\n",
    "            nertag[start:stop] = 0\n",
    "        else:\n",
    "            starttagstr = \"B-\" + tagstr\n",
    "            intenelstr = \"I-\" + tagstr\n",
    "            nertag[start] = tag2id[starttagstr]\n",
    "            nertag[start + 1:stop] = tag2id[intenelstr]\n",
    "    return {\"id\": linejson[\"id\"], \"tokens\": tokens, \"ner_tags\": nertag}\n",
    "\n",
    "def processFile(filename):\n",
    "    f=open(filename,'r',encoding='utf8')\n",
    "    lines = f.readlines()\n",
    "    processed = list(map(processLabelInALine, lines))\n",
    "    processNone = list(filter(None, processed))\n",
    "    df = pd.DataFrame(processNone)\n",
    "    landDataset =  Dataset.from_pandas(df, split=\"train\")\n",
    "    return landDataset"
   ],
   "metadata": {
    "execution": {
     "iopub.status.busy": "2022-08-13T11:48:48.486608Z",
     "iopub.execute_input": "2022-08-13T11:48:48.487560Z",
     "iopub.status.idle": "2022-08-13T11:48:49.466739Z",
     "shell.execute_reply.started": "2022-08-13T11:48:48.487516Z",
     "shell.execute_reply": "2022-08-13T11:48:49.465652Z"
    },
    "trusted": true,
    "pycharm": {
     "name": "#%%\n"
    }
   },
   "execution_count": 42,
   "outputs": []
  },
  {
   "cell_type": "code",
   "source": [
    "#预处理数据，label转化为规范格式。\n",
    "from datasets import DatasetDict\n",
    "land_train,land_valid = processFile(\"train.json\"), processFile(\"valid.json\")\n",
    "dataset = DatasetDict()\n",
    "dataset['train'] = land_train\n",
    "dataset['valid'] = land_valid\n",
    "dataset"
   ],
   "metadata": {
    "execution": {
     "iopub.status.busy": "2022-08-13T11:48:49.468818Z",
     "iopub.execute_input": "2022-08-13T11:48:49.470239Z",
     "iopub.status.idle": "2022-08-13T11:48:49.703016Z",
     "shell.execute_reply.started": "2022-08-13T11:48:49.470183Z",
     "shell.execute_reply": "2022-08-13T11:48:49.701682Z"
    },
    "trusted": true,
    "pycharm": {
     "name": "#%%\n"
    }
   },
   "execution_count": 43,
   "outputs": [
    {
     "data": {
      "text/plain": "DatasetDict({\n    train: Dataset({\n        features: ['id', 'tokens', 'ner_tags'],\n        num_rows: 1277\n    })\n    valid: Dataset({\n        features: ['id', 'tokens', 'ner_tags'],\n        num_rows: 426\n    })\n})"
     },
     "execution_count": 43,
     "metadata": {},
     "output_type": "execute_result"
    }
   ]
  },
  {
   "cell_type": "code",
   "source": [
    "# hide_output\n",
    "from transformers import AutoTokenizer\n",
    "#加载\n",
    "#bert_model_name = \"bert-base-chinese\"\n",
    "xlmr_model_name = \"xlm-roberta-base\"\n",
    "#bert_tokenizer = AutoTokenizer.from_pretrained(bert_model_name)\n",
    "xlmr_tokenizer = AutoTokenizer.from_pretrained(xlmr_model_name)\n",
    "def tokenize_and_align_labels(examples):\n",
    "    #输入字的list，输出用tokenizer的词典中序号表示的字，并对特殊字符和子词进行特殊处理。\n",
    "    tokenized_inputs = xlmr_tokenizer(examples[\"tokens\"], truncation=True, \n",
    "                                      is_split_into_words=True)\n",
    "    #定义空的标签表示\n",
    "    labels = []\n",
    "    #迭代输入的ner_tags标签\n",
    "    for idx, label in enumerate(examples[\"ner_tags\"]):\n",
    "        #tokenized_input包含word_ids函数，实现对子词与整词的识别。\n",
    "        #这里我们可以看到word_ids将每个子单词映射到单词序列中对应的索引\n",
    "        word_ids = tokenized_inputs.word_ids(batch_index=idx)\n",
    "        previous_word_idx = None\n",
    "        label_ids = []\n",
    "        for word_idx in word_ids:\n",
    "            # 将特殊符号的标签设置为-100，以便在计算损失函数时自动忽略\n",
    "            if word_idx is None:\n",
    "                label_ids.append(-100)\n",
    "            # 把标签设置到每个词的第一个token上\n",
    "            elif word_idx != previous_word_idx:\n",
    "                label_ids.append(label[word_idx])\n",
    "            # 对于每个词的其他token也设置为当前标签\n",
    "            else:\n",
    "                label_ids.append(label[word_idx])\n",
    "            previous_word_idx = word_idx\n",
    "            \n",
    "        labels.append(label_ids)\n",
    "    #把处理后的labels数组，设置为tokenized_inputs[\"labels\"]\n",
    "    tokenized_inputs[\"labels\"] = labels\n",
    "    return tokenized_inputs"
   ],
   "metadata": {
    "execution": {
     "iopub.status.busy": "2022-08-13T11:48:49.705829Z",
     "iopub.execute_input": "2022-08-13T11:48:49.706861Z",
     "iopub.status.idle": "2022-08-13T11:49:08.022787Z",
     "shell.execute_reply.started": "2022-08-13T11:48:49.706807Z",
     "shell.execute_reply": "2022-08-13T11:49:08.021184Z"
    },
    "trusted": true,
    "pycharm": {
     "name": "#%%\n"
    }
   },
   "execution_count": 44,
   "outputs": []
  },
  {
   "cell_type": "code",
   "source": [
    "from transformers import AutoConfig\n",
    "\n",
    "xlmr_config = AutoConfig.from_pretrained(xlmr_model_name, \n",
    "                                         num_labels=len(schemas),\n",
    "                                         id2label=id2tag,\n",
    "                                         label2id=tag2id)"
   ],
   "metadata": {
    "execution": {
     "iopub.status.busy": "2022-08-13T12:01:45.448753Z",
     "iopub.execute_input": "2022-08-13T12:01:45.449254Z",
     "iopub.status.idle": "2022-08-13T12:01:45.493598Z",
     "shell.execute_reply.started": "2022-08-13T12:01:45.449215Z",
     "shell.execute_reply": "2022-08-13T12:01:45.491877Z"
    },
    "trusted": true,
    "pycharm": {
     "name": "#%%\n"
    }
   },
   "execution_count": 45,
   "outputs": []
  },
  {
   "cell_type": "code",
   "source": [
    "tokenized_datasets = dataset.map(tokenize_and_align_labels, batched=True, load_from_cache_file=False)\n",
    "pd.DataFrame([tokenized_datasets[\"train\"][\"tokens\"][0],tokenized_datasets[\"train\"][\"ner_tags\"][0]])"
   ],
   "metadata": {
    "execution": {
     "iopub.status.busy": "2022-08-13T12:01:54.483897Z",
     "iopub.execute_input": "2022-08-13T12:01:54.484323Z",
     "iopub.status.idle": "2022-08-13T12:01:56.171570Z",
     "shell.execute_reply.started": "2022-08-13T12:01:54.484288Z",
     "shell.execute_reply": "2022-08-13T12:01:56.170675Z"
    },
    "trusted": true,
    "pycharm": {
     "name": "#%%\n"
    }
   },
   "execution_count": 46,
   "outputs": [
    {
     "data": {
      "text/plain": "  0%|          | 0/2 [00:00<?, ?ba/s]",
      "application/vnd.jupyter.widget-view+json": {
       "version_major": 2,
       "version_minor": 0,
       "model_id": "ac98af11562b485cb03d0674d71e4bc6"
      }
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "data": {
      "text/plain": "  0%|          | 0/1 [00:00<?, ?ba/s]",
      "application/vnd.jupyter.widget-view+json": {
       "version_major": 2,
       "version_minor": 0,
       "model_id": "4f1e478d68d44452a8b7803778622689"
      }
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "data": {
      "text/plain": "    0    1    2    3    4    5    6    7    8    9   ...    74    75    76  \\\n0    临    政    储    出    [    2    0    2    1    ]  ...     8     5     %   \n1  1.0  2.0  2.0  2.0  2.0  2.0  2.0  2.0  2.0  2.0  ...  16.0  16.0  16.0   \n\n    77   78   79   80   81   82   83  \n0    ，    自    持    2    8    %    。  \n1  0.0  0.0  0.0  0.0  0.0  0.0  0.0  \n\n[2 rows x 84 columns]",
      "text/html": "<div>\n<style scoped>\n    .dataframe tbody tr th:only-of-type {\n        vertical-align: middle;\n    }\n\n    .dataframe tbody tr th {\n        vertical-align: top;\n    }\n\n    .dataframe thead th {\n        text-align: right;\n    }\n</style>\n<table border=\"1\" class=\"dataframe\">\n  <thead>\n    <tr style=\"text-align: right;\">\n      <th></th>\n      <th>0</th>\n      <th>1</th>\n      <th>2</th>\n      <th>3</th>\n      <th>4</th>\n      <th>5</th>\n      <th>6</th>\n      <th>7</th>\n      <th>8</th>\n      <th>9</th>\n      <th>...</th>\n      <th>74</th>\n      <th>75</th>\n      <th>76</th>\n      <th>77</th>\n      <th>78</th>\n      <th>79</th>\n      <th>80</th>\n      <th>81</th>\n      <th>82</th>\n      <th>83</th>\n    </tr>\n  </thead>\n  <tbody>\n    <tr>\n      <th>0</th>\n      <td>临</td>\n      <td>政</td>\n      <td>储</td>\n      <td>出</td>\n      <td>[</td>\n      <td>2</td>\n      <td>0</td>\n      <td>2</td>\n      <td>1</td>\n      <td>]</td>\n      <td>...</td>\n      <td>8</td>\n      <td>5</td>\n      <td>%</td>\n      <td>，</td>\n      <td>自</td>\n      <td>持</td>\n      <td>2</td>\n      <td>8</td>\n      <td>%</td>\n      <td>。</td>\n    </tr>\n    <tr>\n      <th>1</th>\n      <td>1.0</td>\n      <td>2.0</td>\n      <td>2.0</td>\n      <td>2.0</td>\n      <td>2.0</td>\n      <td>2.0</td>\n      <td>2.0</td>\n      <td>2.0</td>\n      <td>2.0</td>\n      <td>2.0</td>\n      <td>...</td>\n      <td>16.0</td>\n      <td>16.0</td>\n      <td>16.0</td>\n      <td>0.0</td>\n      <td>0.0</td>\n      <td>0.0</td>\n      <td>0.0</td>\n      <td>0.0</td>\n      <td>0.0</td>\n      <td>0.0</td>\n    </tr>\n  </tbody>\n</table>\n<p>2 rows × 84 columns</p>\n</div>"
     },
     "execution_count": 46,
     "metadata": {},
     "output_type": "execute_result"
    }
   ]
  },
  {
   "cell_type": "code",
   "source": [
    "from seqeval.metrics import classification_report\n",
    "\n",
    "y_true = [[\"O\", \"O\", \"O\", \"B-MISC\", \"I-MISC\", \"I-MISC\", \"O\"],\n",
    "          [\"B-PER\", \"I-PER\", \"O\"]]\n",
    "y_pred = [[\"O\", \"O\", \"B-MISC\", \"I-MISC\", \"I-MISC\", \"I-MISC\", \"O\"],\n",
    "          [\"B-PER\", \"I-PER\", \"O\"]]\n",
    "print(classification_report(y_true, y_pred))"
   ],
   "metadata": {
    "execution": {
     "iopub.status.busy": "2022-08-13T11:49:09.368774Z",
     "iopub.execute_input": "2022-08-13T11:49:09.369275Z",
     "iopub.status.idle": "2022-08-13T11:49:09.469965Z",
     "shell.execute_reply.started": "2022-08-13T11:49:09.369227Z",
     "shell.execute_reply": "2022-08-13T11:49:09.468695Z"
    },
    "trusted": true,
    "pycharm": {
     "name": "#%%\n"
    }
   },
   "execution_count": 47,
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "              precision    recall  f1-score   support\n",
      "\n",
      "        MISC       0.00      0.00      0.00         1\n",
      "         PER       1.00      1.00      1.00         1\n",
      "\n",
      "   micro avg       0.50      0.50      0.50         2\n",
      "   macro avg       0.50      0.50      0.50         2\n",
      "weighted avg       0.50      0.50      0.50         2\n",
      "\n"
     ]
    }
   ]
  },
  {
   "cell_type": "code",
   "source": [
    "from seqeval.metrics import classification_report\n",
    "\n",
    "#seqeval期望将预测和标签作为列表的列表，每个列表对应于我们的验证或测试集中的单个示例。\n",
    "#获取模型的输出，并将它们转换为seqeval所期望的列表。下面的技巧是通过确保我们忽略与后续子词相关联的标签id\n",
    "#Prediction\n",
    "import numpy as np\n",
    "from seqeval.metrics import f1_score\n",
    "from seqeval.metrics import classification_report\n",
    "\n",
    "def align_predictions(predictions, label_ids):\n",
    "    preds = np.argmax(predictions, axis=2)\n",
    "    batch_size, seq_len = preds.shape\n",
    "    labels_list, preds_list = [], []\n",
    "\n",
    "    for batch_idx in range(batch_size):\n",
    "        example_labels, example_preds = [], []\n",
    "        for seq_idx in range(seq_len):\n",
    "            # Ignore label IDs = -100\n",
    "            if label_ids[batch_idx, seq_idx] != -100:\n",
    "                example_labels.append(id2tag[label_ids[batch_idx][seq_idx]])\n",
    "                example_preds.append(tag2id[preds[batch_idx][seq_idx]])\n",
    "\n",
    "        labels_list.append(example_labels)\n",
    "        preds_list.append(example_preds)\n",
    "\n",
    "    return preds_list, labels_list\n",
    "\n",
    "def compute_metrics(eval_pred):\n",
    "    y_pred, y_true = align_predictions(eval_pred.predictions,\n",
    "                                       eval_pred.label_ids)\n",
    "    return {\"f1\": f1_score(y_true, y_pred)}"
   ],
   "metadata": {
    "execution": {
     "iopub.status.busy": "2022-08-13T11:49:09.472086Z",
     "iopub.execute_input": "2022-08-13T11:49:09.472792Z",
     "iopub.status.idle": "2022-08-13T11:49:09.482121Z",
     "shell.execute_reply.started": "2022-08-13T11:49:09.472751Z",
     "shell.execute_reply": "2022-08-13T11:49:09.481035Z"
    },
    "trusted": true,
    "pycharm": {
     "name": "#%%\n"
    }
   },
   "execution_count": 48,
   "outputs": []
  },
  {
   "cell_type": "markdown",
   "source": [
    "# 定义模型"
   ],
   "metadata": {
    "pycharm": {
     "name": "#%% md\n"
    }
   }
  },
  {
   "cell_type": "code",
   "source": [
    "import torch.nn as nn\n",
    "from transformers import XLMRobertaConfig\n",
    "from transformers.modeling_outputs import TokenClassifierOutput\n",
    "from transformers.models.roberta.modeling_roberta import RobertaModel\n",
    "from transformers.models.roberta.modeling_roberta import RobertaPreTrainedModel\n",
    "\n",
    "class XLMRobertaForTokenClassification(RobertaPreTrainedModel):\n",
    "    config_class = XLMRobertaConfig\n",
    "\n",
    "    def __init__(self, config):\n",
    "        super().__init__(config)\n",
    "        self.num_labels = config.num_labels\n",
    "        # Load model body\n",
    "        self.roberta = RobertaModel(config, add_pooling_layer=False)\n",
    "        # Set up token classification head\n",
    "        self.dropout = nn.Dropout(config.hidden_dropout_prob)\n",
    "        self.classifier = nn.Linear(config.hidden_size, config.num_labels)\n",
    "        # Load and initialize weights\n",
    "        self.init_weights()\n",
    "\n",
    "    def forward(self, input_ids=None, attention_mask=None, token_type_ids=None, \n",
    "                labels=None, **kwargs):\n",
    "        # Use model body to get encoder representations\n",
    "        outputs = self.roberta(input_ids, attention_mask=attention_mask,\n",
    "                               token_type_ids=token_type_ids, **kwargs)\n",
    "        # Apply classifier to encoder representation\n",
    "        sequence_output = self.dropout(outputs[0])\n",
    "        logits = self.classifier(sequence_output)\n",
    "        # Calculate losses\n",
    "        loss = None\n",
    "        if labels is not None:\n",
    "            loss_fct = nn.CrossEntropyLoss()\n",
    "            loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))\n",
    "        # Return model output object\n",
    "        return TokenClassifierOutput(loss=loss, logits=logits, \n",
    "                                     hidden_states=outputs.hidden_states, \n",
    "                                     attentions=outputs.attentions)"
   ],
   "metadata": {
    "execution": {
     "iopub.status.busy": "2022-08-13T11:49:09.486288Z",
     "iopub.execute_input": "2022-08-13T11:49:09.486877Z",
     "iopub.status.idle": "2022-08-13T11:49:09.518279Z",
     "shell.execute_reply.started": "2022-08-13T11:49:09.486807Z",
     "shell.execute_reply": "2022-08-13T11:49:09.516949Z"
    },
    "trusted": true,
    "pycharm": {
     "name": "#%%\n"
    }
   },
   "execution_count": 49,
   "outputs": []
  },
  {
   "cell_type": "markdown",
   "source": [
    "# 加载模型"
   ],
   "metadata": {
    "pycharm": {
     "name": "#%% md\n"
    }
   }
  },
  {
   "cell_type": "code",
   "source": [
    "from transformers import DataCollatorForTokenClassification\n",
    "import torch\n",
    "data_collator = DataCollatorForTokenClassification(xlmr_tokenizer)\n",
    "device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n",
    "\n",
    "def model_init():\n",
    "    return (XLMRobertaForTokenClassification\n",
    "            .from_pretrained(model_name, config=xlmr_config)\n",
    "            .to(device))"
   ],
   "metadata": {
    "execution": {
     "iopub.status.busy": "2022-08-13T11:49:09.868104Z",
     "iopub.execute_input": "2022-08-13T11:49:09.869980Z",
     "iopub.status.idle": "2022-08-13T11:49:09.973337Z",
     "shell.execute_reply.started": "2022-08-13T11:49:09.869923Z",
     "shell.execute_reply": "2022-08-13T11:49:09.972315Z"
    },
    "trusted": true,
    "pycharm": {
     "name": "#%%\n"
    }
   },
   "execution_count": 50,
   "outputs": []
  },
  {
   "cell_type": "markdown",
   "source": [
    "# 精调模型"
   ],
   "metadata": {
    "pycharm": {
     "name": "#%% md\n"
    }
   }
  },
  {
   "cell_type": "code",
   "source": [
    "# hide_output\n",
    "from transformers import TrainingArguments\n",
    "from transformers import AutoConfig\n",
    "\n",
    "num_epochs = 3\n",
    "batch_size = 24\n",
    "logging_steps = len(tokenized_datasets[\"train\"]) // batch_size\n",
    "model_name = xlmr_model_name\n",
    "training_args = TrainingArguments(\n",
    "    output_dir=model_name, log_level=\"error\", num_train_epochs=num_epochs,\n",
    "    per_device_train_batch_size=batch_size,\n",
    "    per_device_eval_batch_size=batch_size, evaluation_strategy=\"epoch\",\n",
    "    save_steps=1e6, weight_decay=0.01, disable_tqdm=False,\n",
    "    logging_steps=logging_steps, push_to_hub=False)"
   ],
   "metadata": {
    "execution": {
     "iopub.status.busy": "2022-08-13T11:49:09.974527Z",
     "iopub.execute_input": "2022-08-13T11:49:09.975340Z",
     "iopub.status.idle": "2022-08-13T11:49:10.001058Z",
     "shell.execute_reply.started": "2022-08-13T11:49:09.975302Z",
     "shell.execute_reply": "2022-08-13T11:49:10.000001Z"
    },
    "trusted": true,
    "pycharm": {
     "name": "#%%\n"
    }
   },
   "execution_count": 51,
   "outputs": []
  },
  {
   "cell_type": "code",
   "source": [
    "from huggingface_hub import notebook_login\n",
    "notebook_login()"
   ],
   "metadata": {
    "execution": {
     "iopub.status.busy": "2022-08-13T11:49:10.003140Z",
     "iopub.execute_input": "2022-08-13T11:49:10.004065Z",
     "iopub.status.idle": "2022-08-13T11:49:10.009237Z",
     "shell.execute_reply.started": "2022-08-13T11:49:10.004013Z",
     "shell.execute_reply": "2022-08-13T11:49:10.007992Z"
    },
    "trusted": true,
    "pycharm": {
     "name": "#%%\n"
    }
   },
   "execution_count": 58,
   "outputs": []
  },
  {
   "cell_type": "code",
   "source": [
    "%env TOKENIZERS_PARALLELISM=false"
   ],
   "metadata": {
    "execution": {
     "iopub.status.busy": "2022-08-13T11:49:10.011119Z",
     "iopub.execute_input": "2022-08-13T11:49:10.011889Z",
     "iopub.status.idle": "2022-08-13T11:49:10.024316Z",
     "shell.execute_reply.started": "2022-08-13T11:49:10.011838Z",
     "shell.execute_reply": "2022-08-13T11:49:10.022573Z"
    },
    "trusted": true,
    "pycharm": {
     "name": "#%%\n"
    }
   },
   "execution_count": 59,
   "outputs": []
  },
  {
   "cell_type": "code",
   "source": [
    "from transformers import Trainer\n",
    "\n",
    "trainer = Trainer(model_init=model_init, args=training_args, \n",
    "                  data_collator=data_collator, compute_metrics=compute_metrics,\n",
    "                  train_dataset=tokenized_datasets[\"train\"],\n",
    "                  eval_dataset=tokenized_datasets[\"valid\"], \n",
    "                  tokenizer=xlmr_tokenizer)"
   ],
   "metadata": {
    "execution": {
     "iopub.status.busy": "2022-08-13T11:49:10.026848Z",
     "iopub.execute_input": "2022-08-13T11:49:10.028744Z",
     "iopub.status.idle": "2022-08-13T11:50:12.690332Z",
     "shell.execute_reply.started": "2022-08-13T11:49:10.027899Z",
     "shell.execute_reply": "2022-08-13T11:50:12.689060Z"
    },
    "trusted": true,
    "pycharm": {
     "name": "#%%\n",
     "is_executing": true
    }
   },
   "execution_count": null,
   "outputs": []
  },
  {
   "cell_type": "code",
   "source": [
    "trainer.train()\n"
   ],
   "metadata": {
    "execution": {
     "iopub.status.busy": "2022-08-13T11:50:12.692183Z",
     "iopub.execute_input": "2022-08-13T11:50:12.694938Z",
     "iopub.status.idle": "2022-08-13T11:50:13.160329Z",
     "shell.execute_reply.started": "2022-08-13T11:50:12.694876Z",
     "shell.execute_reply": "2022-08-13T11:50:13.158874Z"
    },
    "trusted": true,
    "pycharm": {
     "name": "#%%\n"
    }
   },
   "execution_count": 57,
   "outputs": [
    {
     "ename": "OSError",
     "evalue": "Error no file named ['pytorch_model.bin', 'tf_model.h5', 'model.ckpt.index', 'flax_model.msgpack'] found in directory xlm-roberta-base or `from_tf` and `from_flax` set to False.",
     "output_type": "error",
     "traceback": [
      "\u001B[1;31m---------------------------------------------------------------------------\u001B[0m",
      "\u001B[1;31mOSError\u001B[0m                                   Traceback (most recent call last)",
      "\u001B[1;32m~\\AppData\\Local\\Temp/ipykernel_19128/3352579090.py\u001B[0m in \u001B[0;36m<module>\u001B[1;34m\u001B[0m\n\u001B[1;32m----> 1\u001B[1;33m \u001B[0mtrainer\u001B[0m\u001B[1;33m.\u001B[0m\u001B[0mtrain\u001B[0m\u001B[1;33m(\u001B[0m\u001B[1;33m)\u001B[0m\u001B[1;33m\u001B[0m\u001B[1;33m\u001B[0m\u001B[0m\n\u001B[0m\u001B[0;32m      2\u001B[0m \u001B[1;33m\u001B[0m\u001B[0m\n",
      "\u001B[1;32m~\\Anaconda3\\envs\\book\\lib\\site-packages\\transformers\\trainer.py\u001B[0m in \u001B[0;36mtrain\u001B[1;34m(self, resume_from_checkpoint, trial, ignore_keys_for_eval, **kwargs)\u001B[0m\n\u001B[0;32m   1064\u001B[0m             \u001B[1;31m# Seed must be set before instantiating the model when using model_init.\u001B[0m\u001B[1;33m\u001B[0m\u001B[1;33m\u001B[0m\u001B[0m\n\u001B[0;32m   1065\u001B[0m             \u001B[0mset_seed\u001B[0m\u001B[1;33m(\u001B[0m\u001B[0margs\u001B[0m\u001B[1;33m.\u001B[0m\u001B[0mseed\u001B[0m\u001B[1;33m)\u001B[0m\u001B[1;33m\u001B[0m\u001B[1;33m\u001B[0m\u001B[0m\n\u001B[1;32m-> 1066\u001B[1;33m             \u001B[0mself\u001B[0m\u001B[1;33m.\u001B[0m\u001B[0mmodel\u001B[0m \u001B[1;33m=\u001B[0m \u001B[0mself\u001B[0m\u001B[1;33m.\u001B[0m\u001B[0mcall_model_init\u001B[0m\u001B[1;33m(\u001B[0m\u001B[0mtrial\u001B[0m\u001B[1;33m)\u001B[0m\u001B[1;33m\u001B[0m\u001B[1;33m\u001B[0m\u001B[0m\n\u001B[0m\u001B[0;32m   1067\u001B[0m             \u001B[0mmodel_reloaded\u001B[0m \u001B[1;33m=\u001B[0m \u001B[1;32mTrue\u001B[0m\u001B[1;33m\u001B[0m\u001B[1;33m\u001B[0m\u001B[0m\n\u001B[0;32m   1068\u001B[0m             \u001B[1;31m# Reinitializes optimizer and scheduler\u001B[0m\u001B[1;33m\u001B[0m\u001B[1;33m\u001B[0m\u001B[0m\n",
      "\u001B[1;32m~\\Anaconda3\\envs\\book\\lib\\site-packages\\transformers\\trainer.py\u001B[0m in \u001B[0;36mcall_model_init\u001B[1;34m(self, trial)\u001B[0m\n\u001B[0;32m    931\u001B[0m         \u001B[0mmodel_init_argcount\u001B[0m \u001B[1;33m=\u001B[0m \u001B[0mnumber_of_arguments\u001B[0m\u001B[1;33m(\u001B[0m\u001B[0mself\u001B[0m\u001B[1;33m.\u001B[0m\u001B[0mmodel_init\u001B[0m\u001B[1;33m)\u001B[0m\u001B[1;33m\u001B[0m\u001B[1;33m\u001B[0m\u001B[0m\n\u001B[0;32m    932\u001B[0m         \u001B[1;32mif\u001B[0m \u001B[0mmodel_init_argcount\u001B[0m \u001B[1;33m==\u001B[0m \u001B[1;36m0\u001B[0m\u001B[1;33m:\u001B[0m\u001B[1;33m\u001B[0m\u001B[1;33m\u001B[0m\u001B[0m\n\u001B[1;32m--> 933\u001B[1;33m             \u001B[0mmodel\u001B[0m \u001B[1;33m=\u001B[0m \u001B[0mself\u001B[0m\u001B[1;33m.\u001B[0m\u001B[0mmodel_init\u001B[0m\u001B[1;33m(\u001B[0m\u001B[1;33m)\u001B[0m\u001B[1;33m\u001B[0m\u001B[1;33m\u001B[0m\u001B[0m\n\u001B[0m\u001B[0;32m    934\u001B[0m         \u001B[1;32melif\u001B[0m \u001B[0mmodel_init_argcount\u001B[0m \u001B[1;33m==\u001B[0m \u001B[1;36m1\u001B[0m\u001B[1;33m:\u001B[0m\u001B[1;33m\u001B[0m\u001B[1;33m\u001B[0m\u001B[0m\n\u001B[0;32m    935\u001B[0m             \u001B[0mmodel\u001B[0m \u001B[1;33m=\u001B[0m \u001B[0mself\u001B[0m\u001B[1;33m.\u001B[0m\u001B[0mmodel_init\u001B[0m\u001B[1;33m(\u001B[0m\u001B[0mtrial\u001B[0m\u001B[1;33m)\u001B[0m\u001B[1;33m\u001B[0m\u001B[1;33m\u001B[0m\u001B[0m\n",
      "\u001B[1;32m~\\AppData\\Local\\Temp/ipykernel_19128/165744605.py\u001B[0m in \u001B[0;36mmodel_init\u001B[1;34m()\u001B[0m\n\u001B[0;32m      5\u001B[0m \u001B[1;33m\u001B[0m\u001B[0m\n\u001B[0;32m      6\u001B[0m \u001B[1;32mdef\u001B[0m \u001B[0mmodel_init\u001B[0m\u001B[1;33m(\u001B[0m\u001B[1;33m)\u001B[0m\u001B[1;33m:\u001B[0m\u001B[1;33m\u001B[0m\u001B[1;33m\u001B[0m\u001B[0m\n\u001B[1;32m----> 7\u001B[1;33m     return (XLMRobertaForTokenClassification\n\u001B[0m\u001B[0;32m      8\u001B[0m             \u001B[1;33m.\u001B[0m\u001B[0mfrom_pretrained\u001B[0m\u001B[1;33m(\u001B[0m\u001B[0mmodel_name\u001B[0m\u001B[1;33m,\u001B[0m \u001B[0mconfig\u001B[0m\u001B[1;33m=\u001B[0m\u001B[0mxlmr_config\u001B[0m\u001B[1;33m)\u001B[0m\u001B[1;33m\u001B[0m\u001B[1;33m\u001B[0m\u001B[0m\n\u001B[0;32m      9\u001B[0m             .to(device))\n",
      "\u001B[1;32m~\\Anaconda3\\envs\\book\\lib\\site-packages\\transformers\\modeling_utils.py\u001B[0m in \u001B[0;36mfrom_pretrained\u001B[1;34m(cls, pretrained_model_name_or_path, *model_args, **kwargs)\u001B[0m\n\u001B[0;32m   1264\u001B[0m                     \u001B[0marchive_file\u001B[0m \u001B[1;33m=\u001B[0m \u001B[0mos\u001B[0m\u001B[1;33m.\u001B[0m\u001B[0mpath\u001B[0m\u001B[1;33m.\u001B[0m\u001B[0mjoin\u001B[0m\u001B[1;33m(\u001B[0m\u001B[0mpretrained_model_name_or_path\u001B[0m\u001B[1;33m,\u001B[0m \u001B[0mWEIGHTS_NAME\u001B[0m\u001B[1;33m)\u001B[0m\u001B[1;33m\u001B[0m\u001B[1;33m\u001B[0m\u001B[0m\n\u001B[0;32m   1265\u001B[0m                 \u001B[1;32melse\u001B[0m\u001B[1;33m:\u001B[0m\u001B[1;33m\u001B[0m\u001B[1;33m\u001B[0m\u001B[0m\n\u001B[1;32m-> 1266\u001B[1;33m                     raise EnvironmentError(\n\u001B[0m\u001B[0;32m   1267\u001B[0m                         \u001B[1;34mf\"Error no file named {[WEIGHTS_NAME, TF2_WEIGHTS_NAME, TF_WEIGHTS_NAME + '.index', FLAX_WEIGHTS_NAME]} found in \"\u001B[0m\u001B[1;33m\u001B[0m\u001B[1;33m\u001B[0m\u001B[0m\n\u001B[0;32m   1268\u001B[0m                         \u001B[1;34mf\"directory {pretrained_model_name_or_path} or `from_tf` and `from_flax` set to False.\"\u001B[0m\u001B[1;33m\u001B[0m\u001B[1;33m\u001B[0m\u001B[0m\n",
      "\u001B[1;31mOSError\u001B[0m: Error no file named ['pytorch_model.bin', 'tf_model.h5', 'model.ckpt.index', 'flax_model.msgpack'] found in directory xlm-roberta-base or `from_tf` and `from_flax` set to False."
     ]
    }
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "outputs": [],
   "source": [],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  }
 ]
}