{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {
    "collapsed": true
   },
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "D:\\ProgramData\\anaconda3\\envs\\pytorch\\lib\\site-packages\\transformers\\tokenization_utils_base.py:1601: FutureWarning: `clean_up_tokenization_spaces` was not set. It will be set to `True` by default. This behavior will be depracted in transformers v4.45, and will be then set to `False` by default. For more details check this issue: https://github.com/huggingface/transformers/issues/31884\n",
      "  warnings.warn(\n"
     ]
    }
   ],
   "source": [
    "from transformers import AutoTokenizer\n",
    "model_path = r\"F:\\models\\rbt3\"\n",
    "tokenizer = AutoTokenizer.from_pretrained(model_path)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Asking to truncate to max_length but no maximum length is provided and the model has no predefined maximum length. Default to no truncation.\n"
     ]
    },
    {
     "data": {
      "text/plain": "{'input_ids': [[101, 3209, 3299, 6163, 7652, 749, 872, 4638, 4970, 2094, 102], [101, 872, 6163, 7652, 749, 1166, 782, 4638, 3457, 102]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]}"
     },
     "execution_count": 2,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "tokenizer.batch_encode_plus(\n",
    "    ['明月装饰了你的窗子', '你装饰了别人的梦'],\n",
    "    truncation=True,\n",
    ")"
   ],
   "metadata": {
    "collapsed": false
   }
  },
  {
   "cell_type": "markdown",
   "source": [
    "### 添加镜像地址，同时下载模型，把模型保存到本机 ###"
   ],
   "metadata": {
    "collapsed": false
   }
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "outputs": [],
   "source": [
    "#import os\n",
    "#os.environ['HF_ENDPOINT'] = 'https://hf-mirror.com'\n",
    "#from datasets import load_dataset\n",
    "#dataset_path = r\"F:\\dataset\\ChnSentiCorp\"\n",
    "#dataset = load_dataset(\"lansinuote/ChnSentiCorp\", cache_dir=dataset_path)"
   ],
   "metadata": {
    "collapsed": false
   }
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "outputs": [],
   "source": [
    "#dataset.save_to_disk(r\"F:\\dataset\\ChnSentiCorp\\save\")"
   ],
   "metadata": {
    "collapsed": false
   }
  },
  {
   "cell_type": "markdown",
   "source": [
    "### 首先下载，然后第二次运行时可以直接导入本机模型 ###"
   ],
   "metadata": {
    "collapsed": false
   }
  },
  {
   "cell_type": "code",
   "execution_count": 56,
   "outputs": [
    {
     "data": {
      "text/plain": "DatasetDict({\n    train: Dataset({\n        features: ['text', 'label'],\n        num_rows: 9600\n    })\n    validation: Dataset({\n        features: ['text', 'label'],\n        num_rows: 1200\n    })\n    test: Dataset({\n        features: ['text', 'label'],\n        num_rows: 1200\n    })\n})"
     },
     "execution_count": 56,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "from datasets import load_from_disk\n",
    "dataset = load_from_disk(r\"F:\\dataset\\ChnSentiCorp\\save\")\n",
    "dataset"
   ],
   "metadata": {
    "collapsed": false
   }
  },
  {
   "cell_type": "markdown",
   "source": [
    "### shuffle是洗牌，然后select是选取，选取根据range产生的序列"
   ],
   "metadata": {
    "collapsed": false
   }
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "outputs": [
    {
     "data": {
      "text/plain": "DatasetDict({\n    train: Dataset({\n        features: ['text', 'label'],\n        num_rows: 2000\n    })\n    validation: Dataset({\n        features: ['text', 'label'],\n        num_rows: 1200\n    })\n    test: Dataset({\n        features: ['text', 'label'],\n        num_rows: 100\n    })\n})"
     },
     "execution_count": 6,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "dataset['train'] = dataset['train'].shuffle().select(range(2000))\n",
    "dataset['test'] = dataset['test'].shuffle().select(range(100))\n",
    "dataset"
   ],
   "metadata": {
    "collapsed": false
   }
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "outputs": [],
   "source": [
    "dataset['validation']=dataset['validation'].shuffle().select(range(200))"
   ],
   "metadata": {
    "collapsed": false
   }
  },
  {
   "cell_type": "code",
   "execution_count": 47,
   "outputs": [
    {
     "data": {
      "text/plain": "DatasetDict({\n    train: Dataset({\n        features: ['text', 'label'],\n        num_rows: 9600\n    })\n    validation: Dataset({\n        features: ['text', 'label'],\n        num_rows: 1200\n    })\n    test: Dataset({\n        features: ['text', 'label'],\n        num_rows: 1200\n    })\n})"
     },
     "execution_count": 47,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "dataset"
   ],
   "metadata": {
    "collapsed": false
   }
  },
  {
   "cell_type": "markdown",
   "source": [
    "### 使用tokenizer把文字编码,同时删除text项，map函数的作用是批量修改，batch_encode_plus函数使数据符合bert的输入要求"
   ],
   "metadata": {
    "collapsed": false
   }
  },
  {
   "cell_type": "code",
   "execution_count": 57,
   "outputs": [
    {
     "data": {
      "text/plain": "DatasetDict({\n    train: Dataset({\n        features: ['label', 'input_ids', 'token_type_ids', 'attention_mask'],\n        num_rows: 9600\n    })\n    validation: Dataset({\n        features: ['label', 'input_ids', 'token_type_ids', 'attention_mask'],\n        num_rows: 1200\n    })\n    test: Dataset({\n        features: ['label', 'input_ids', 'token_type_ids', 'attention_mask'],\n        num_rows: 1200\n    })\n})"
     },
     "execution_count": 57,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "dataset = dataset.map(\n",
    "    lambda x: tokenizer.batch_encode_plus(x['text'], truncation=True),\n",
    "    batched=True,\n",
    "    batch_size=1000,\n",
    "    num_proc=1,\n",
    "    remove_columns=['text']\n",
    ")\n",
    "dataset"
   ],
   "metadata": {
    "collapsed": false
   }
  },
  {
   "cell_type": "code",
   "execution_count": 58,
   "outputs": [
    {
     "data": {
      "text/plain": "DatasetDict({\n    train: Dataset({\n        features: ['label', 'input_ids', 'token_type_ids', 'attention_mask'],\n        num_rows: 9491\n    })\n    validation: Dataset({\n        features: ['label', 'input_ids', 'token_type_ids', 'attention_mask'],\n        num_rows: 1190\n    })\n    test: Dataset({\n        features: ['label', 'input_ids', 'token_type_ids', 'attention_mask'],\n        num_rows: 1188\n    })\n})"
     },
     "execution_count": 58,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# 筛选短于512个input_ids的数据\n",
    "dataset = dataset.filter(\n",
    "    lambda data: len(data['input_ids'])<=512,\n",
    "    batch_size=1000,\n",
    "    num_proc=1\n",
    ")\n",
    "dataset"
   ],
   "metadata": {
    "collapsed": false
   }
  },
  {
   "cell_type": "markdown",
   "source": [
    "### 导入模型，使用transformers的序列分类"
   ],
   "metadata": {
    "collapsed": false
   }
  },
  {
   "cell_type": "code",
   "execution_count": 59,
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "loading configuration file F:\\models\\rbt3\\config.json\n",
      "Model config BertConfig {\n",
      "  \"_name_or_path\": \"F:\\\\models\\\\rbt3\",\n",
      "  \"architectures\": [\n",
      "    \"BertForMaskedLM\"\n",
      "  ],\n",
      "  \"attention_probs_dropout_prob\": 0.1,\n",
      "  \"classifier_dropout\": null,\n",
      "  \"directionality\": \"bidi\",\n",
      "  \"hidden_act\": \"gelu\",\n",
      "  \"hidden_dropout_prob\": 0.1,\n",
      "  \"hidden_size\": 768,\n",
      "  \"initializer_range\": 0.02,\n",
      "  \"intermediate_size\": 3072,\n",
      "  \"layer_norm_eps\": 1e-12,\n",
      "  \"max_position_embeddings\": 512,\n",
      "  \"model_type\": \"bert\",\n",
      "  \"num_attention_heads\": 12,\n",
      "  \"num_hidden_layers\": 3,\n",
      "  \"output_past\": true,\n",
      "  \"pad_token_id\": 0,\n",
      "  \"pooler_fc_size\": 768,\n",
      "  \"pooler_num_attention_heads\": 12,\n",
      "  \"pooler_num_fc_layers\": 3,\n",
      "  \"pooler_size_per_head\": 128,\n",
      "  \"pooler_type\": \"first_token_transform\",\n",
      "  \"position_embedding_type\": \"absolute\",\n",
      "  \"transformers_version\": \"4.44.2\",\n",
      "  \"type_vocab_size\": 2,\n",
      "  \"use_cache\": true,\n",
      "  \"vocab_size\": 21128\n",
      "}\n",
      "\n",
      "loading weights file F:\\models\\rbt3\\pytorch_model.bin\n",
      "Some weights of the model checkpoint at F:\\models\\rbt3 were not used when initializing BertForSequenceClassification: ['cls.predictions.bias', 'cls.predictions.decoder.weight', 'cls.predictions.transform.LayerNorm.bias', 'cls.predictions.transform.LayerNorm.weight', 'cls.predictions.transform.dense.bias', 'cls.predictions.transform.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']\n",
      "- This IS expected if you are initializing BertForSequenceClassification from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n",
      "- This IS NOT expected if you are initializing BertForSequenceClassification from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n",
      "Some weights of BertForSequenceClassification were not initialized from the model checkpoint at F:\\models\\rbt3 and are newly initialized: ['classifier.bias', 'classifier.weight']\n",
      "You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.\n"
     ]
    },
    {
     "data": {
      "text/plain": "BertForSequenceClassification(\n  (bert): BertModel(\n    (embeddings): BertEmbeddings(\n      (word_embeddings): Embedding(21128, 768, padding_idx=0)\n      (position_embeddings): Embedding(512, 768)\n      (token_type_embeddings): Embedding(2, 768)\n      (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n      (dropout): Dropout(p=0.1, inplace=False)\n    )\n    (encoder): BertEncoder(\n      (layer): ModuleList(\n        (0-2): 3 x BertLayer(\n          (attention): BertAttention(\n            (self): BertSdpaSelfAttention(\n              (query): Linear(in_features=768, out_features=768, bias=True)\n              (key): Linear(in_features=768, out_features=768, bias=True)\n              (value): Linear(in_features=768, out_features=768, bias=True)\n              (dropout): Dropout(p=0.1, inplace=False)\n            )\n            (output): BertSelfOutput(\n              (dense): Linear(in_features=768, out_features=768, bias=True)\n              (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n              (dropout): Dropout(p=0.1, inplace=False)\n            )\n          )\n          (intermediate): BertIntermediate(\n            (dense): Linear(in_features=768, out_features=3072, bias=True)\n            (intermediate_act_fn): GELUActivation()\n          )\n          (output): BertOutput(\n            (dense): Linear(in_features=3072, out_features=768, bias=True)\n            (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n            (dropout): Dropout(p=0.1, inplace=False)\n          )\n        )\n      )\n    )\n    (pooler): BertPooler(\n      (dense): Linear(in_features=768, out_features=768, bias=True)\n      (activation): Tanh()\n    )\n  )\n  (dropout): Dropout(p=0.1, inplace=False)\n  (classifier): Linear(in_features=768, out_features=2, bias=True)\n)"
     },
     "execution_count": 59,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "from transformers import AutoModelForSequenceClassification\n",
    "import torch\n",
    "device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n",
    "model = AutoModelForSequenceClassification.from_pretrained(r'F:\\models\\rbt3', num_labels=2)\n",
    "for param in model.parameters(): param.data = param.data.contiguous()\n",
    "model.to(device)"
   ],
   "metadata": {
    "collapsed": false
   }
  },
  {
   "cell_type": "markdown",
   "source": [],
   "metadata": {
    "collapsed": false
   }
  },
  {
   "cell_type": "code",
   "execution_count": 45,
   "outputs": [
    {
     "data": {
      "text/plain": "(tensor(1.8792, device='cuda:0', grad_fn=<NllLossBackward0>),\n torch.Size([4, 2]))"
     },
     "execution_count": 45,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "data = {\n",
    "    'input_ids': torch.ones(4, 10, dtype=torch.long).cuda(),\n",
    "    'token_type_ids': torch.ones(4, 10, dtype=torch.long).cuda(),\n",
    "    'attention_mask': torch.ones(4, 10, dtype=torch.long).cuda(),\n",
    "    'labels': torch.ones(4, dtype=torch.long).cuda()\n",
    "}\n",
    "out = model(**data)\n",
    "out['loss'], out['logits'].shape"
   ],
   "metadata": {
    "collapsed": false
   }
  },
  {
   "cell_type": "markdown",
   "source": [
    "###"
   ],
   "metadata": {
    "collapsed": false
   }
  },
  {
   "cell_type": "code",
   "execution_count": 60,
   "outputs": [],
   "source": [
    "import evaluate\n",
    "accuracy = evaluate.load(r\"F:\\evaluate\\metrics\\accuracy\")\n",
    "def compute_metrics(eval_pred):\n",
    "    predictions, labels = eval_pred\n",
    "    logits = predictions.argmax(axis=1)\n",
    "    return accuracy.compute(predictions=logits, references=labels)"
   ],
   "metadata": {
    "collapsed": false
   }
  },
  {
   "cell_type": "code",
   "execution_count": 34,
   "outputs": [
    {
     "data": {
      "text/plain": "{'accuracy': 0.75}"
     },
     "execution_count": 34,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "from transformers import EvalPrediction\n",
    "import numpy as np\n",
    "eval_pred = EvalPrediction(\n",
    "    predictions=np.array([[0, 1], [2, 3], [4, 5], [6, 7]]),\n",
    "    label_ids=np.array([1, 1, 0, 1])\n",
    ")\n",
    "compute_metrics(eval_pred)"
   ],
   "metadata": {
    "collapsed": false
   }
  },
  {
   "cell_type": "code",
   "execution_count": 61,
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "PyTorch: setting up devices\n",
      "The default value for the training argument `--report_to` will change in v5 (from all installed integrations to none). In v5, you will need to use `--report_to all` to get the same behavior as now. You should start updating your code and make this info disappear :-).\n"
     ]
    }
   ],
   "source": [
    "from transformers import TrainingArguments\n",
    "args = TrainingArguments(\n",
    "    output_dir=\"output_dir\",\n",
    "    evaluation_strategy='steps',\n",
    "    eval_steps=30,\n",
    "    save_strategy='steps',\n",
    "    save_steps=30,\n",
    "    num_train_epochs=2,\n",
    "    learning_rate=1e-4,\n",
    "    weight_decay=1e-2,\n",
    "    per_device_eval_batch_size=32,\n",
    "    per_device_train_batch_size=32,\n",
    "    remove_unused_columns=False\n",
    ")"
   ],
   "metadata": {
    "collapsed": false
   }
  },
  {
   "cell_type": "code",
   "execution_count": 62,
   "outputs": [],
   "source": [
    "from transformers import Trainer\n",
    "from transformers.data.data_collator import DataCollatorWithPadding\n",
    "trainer = Trainer(\n",
    "    model = model,\n",
    "    args=args,\n",
    "    train_dataset=dataset['train'],\n",
    "    eval_dataset=dataset['validation'],\n",
    "    compute_metrics=compute_metrics,\n",
    "    data_collator=DataCollatorWithPadding(tokenizer),\n",
    "\n",
    ")"
   ],
   "metadata": {
    "collapsed": false
   }
  },
  {
   "cell_type": "code",
   "execution_count": 20,
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "\n",
      "***** Running Evaluation *****\n",
      "  Num examples = 198\n",
      "  Batch size = 64\n",
      "D:\\ProgramData\\anaconda3\\envs\\pytorch\\lib\\site-packages\\transformers\\models\\bert\\modeling_bert.py:439: UserWarning: 1Torch was not compiled with flash attention. (Triggered internally at C:\\actions-runner\\_work\\pytorch\\pytorch\\builder\\windows\\pytorch\\aten\\src\\ATen\\native\\transformers\\cuda\\sdp_utils.cpp:455.)\n",
      "  attn_output = torch.nn.functional.scaled_dot_product_attention(\n"
     ]
    },
    {
     "data": {
      "text/plain": "<IPython.core.display.HTML object>",
      "text/html": "\n    <div>\n      \n      <progress value='1' max='4' style='width:300px; height:20px; vertical-align: middle;'></progress>\n      [1/4 : < :]\n    </div>\n    "
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "data": {
      "text/plain": "{'eval_loss': 0.6882948279380798,\n 'eval_model_preparation_time': 0.001,\n 'eval_accuracy': 0.5353535353535354,\n 'eval_runtime': 1.2252,\n 'eval_samples_per_second': 161.609,\n 'eval_steps_per_second': 3.265}"
     },
     "execution_count": 20,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "trainer.evaluate()"
   ],
   "metadata": {
    "collapsed": false
   }
  },
  {
   "cell_type": "code",
   "execution_count": 63,
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "***** Running training *****\n",
      "  Num examples = 9,491\n",
      "  Num Epochs = 2\n",
      "  Instantaneous batch size per device = 32\n",
      "  Total train batch size (w. parallel, distributed & accumulation) = 32\n",
      "  Gradient Accumulation steps = 1\n",
      "  Total optimization steps = 594\n",
      "  Number of trainable parameters = 38,478,338\n"
     ]
    },
    {
     "data": {
      "text/plain": "<IPython.core.display.HTML object>",
      "text/html": "\n    <div>\n      \n      <progress value='2' max='594' style='width:300px; height:20px; vertical-align: middle;'></progress>\n      [  2/594 : < :, Epoch 0.00/2]\n    </div>\n    <table border=\"1\" class=\"dataframe\">\n  <thead>\n <tr style=\"text-align: left;\">\n      <th>Step</th>\n      <th>Training Loss</th>\n      <th>Validation Loss</th>\n    </tr>\n  </thead>\n  <tbody>\n  </tbody>\n</table><p>"
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "\n",
      "***** Running Evaluation *****\n",
      "  Num examples = 1190\n",
      "  Batch size = 32\n",
      "Saving model checkpoint to output_dir\\checkpoint-30\n",
      "Configuration saved in output_dir\\checkpoint-30\\config.json\n",
      "Model weights saved in output_dir\\checkpoint-30\\model.safetensors\n",
      "\n",
      "***** Running Evaluation *****\n",
      "  Num examples = 1190\n",
      "  Batch size = 32\n",
      "Saving model checkpoint to output_dir\\checkpoint-60\n",
      "Configuration saved in output_dir\\checkpoint-60\\config.json\n",
      "Model weights saved in output_dir\\checkpoint-60\\model.safetensors\n",
      "\n",
      "***** Running Evaluation *****\n",
      "  Num examples = 1190\n",
      "  Batch size = 32\n",
      "Saving model checkpoint to output_dir\\checkpoint-90\n",
      "Configuration saved in output_dir\\checkpoint-90\\config.json\n",
      "Model weights saved in output_dir\\checkpoint-90\\model.safetensors\n",
      "\n",
      "***** Running Evaluation *****\n",
      "  Num examples = 1190\n",
      "  Batch size = 32\n",
      "Saving model checkpoint to output_dir\\checkpoint-120\n",
      "Configuration saved in output_dir\\checkpoint-120\\config.json\n",
      "Model weights saved in output_dir\\checkpoint-120\\model.safetensors\n",
      "\n",
      "***** Running Evaluation *****\n",
      "  Num examples = 1190\n",
      "  Batch size = 32\n",
      "Saving model checkpoint to output_dir\\checkpoint-150\n",
      "Configuration saved in output_dir\\checkpoint-150\\config.json\n",
      "Model weights saved in output_dir\\checkpoint-150\\model.safetensors\n",
      "\n",
      "***** Running Evaluation *****\n",
      "  Num examples = 1190\n",
      "  Batch size = 32\n",
      "Saving model checkpoint to output_dir\\checkpoint-180\n",
      "Configuration saved in output_dir\\checkpoint-180\\config.json\n",
      "Model weights saved in output_dir\\checkpoint-180\\model.safetensors\n",
      "\n",
      "***** Running Evaluation *****\n",
      "  Num examples = 1190\n",
      "  Batch size = 32\n",
      "Saving model checkpoint to output_dir\\checkpoint-210\n",
      "Configuration saved in output_dir\\checkpoint-210\\config.json\n",
      "Model weights saved in output_dir\\checkpoint-210\\model.safetensors\n",
      "\n",
      "***** Running Evaluation *****\n",
      "  Num examples = 1190\n",
      "  Batch size = 32\n",
      "Saving model checkpoint to output_dir\\checkpoint-240\n",
      "Configuration saved in output_dir\\checkpoint-240\\config.json\n",
      "Model weights saved in output_dir\\checkpoint-240\\model.safetensors\n",
      "\n",
      "***** Running Evaluation *****\n",
      "  Num examples = 1190\n",
      "  Batch size = 32\n",
      "Saving model checkpoint to output_dir\\checkpoint-270\n",
      "Configuration saved in output_dir\\checkpoint-270\\config.json\n",
      "Model weights saved in output_dir\\checkpoint-270\\model.safetensors\n",
      "\n",
      "***** Running Evaluation *****\n",
      "  Num examples = 1190\n",
      "  Batch size = 32\n",
      "Saving model checkpoint to output_dir\\checkpoint-300\n",
      "Configuration saved in output_dir\\checkpoint-300\\config.json\n",
      "Model weights saved in output_dir\\checkpoint-300\\model.safetensors\n",
      "\n",
      "***** Running Evaluation *****\n",
      "  Num examples = 1190\n",
      "  Batch size = 32\n",
      "Saving model checkpoint to output_dir\\checkpoint-330\n",
      "Configuration saved in output_dir\\checkpoint-330\\config.json\n",
      "Model weights saved in output_dir\\checkpoint-330\\model.safetensors\n",
      "\n",
      "***** Running Evaluation *****\n",
      "  Num examples = 1190\n",
      "  Batch size = 32\n",
      "Saving model checkpoint to output_dir\\checkpoint-360\n",
      "Configuration saved in output_dir\\checkpoint-360\\config.json\n",
      "Model weights saved in output_dir\\checkpoint-360\\model.safetensors\n",
      "\n",
      "***** Running Evaluation *****\n",
      "  Num examples = 1190\n",
      "  Batch size = 32\n",
      "Saving model checkpoint to output_dir\\checkpoint-390\n",
      "Configuration saved in output_dir\\checkpoint-390\\config.json\n",
      "Model weights saved in output_dir\\checkpoint-390\\model.safetensors\n",
      "\n",
      "***** Running Evaluation *****\n",
      "  Num examples = 1190\n",
      "  Batch size = 32\n",
      "Saving model checkpoint to output_dir\\checkpoint-420\n",
      "Configuration saved in output_dir\\checkpoint-420\\config.json\n",
      "Model weights saved in output_dir\\checkpoint-420\\model.safetensors\n",
      "\n",
      "***** Running Evaluation *****\n",
      "  Num examples = 1190\n",
      "  Batch size = 32\n",
      "Saving model checkpoint to output_dir\\checkpoint-450\n",
      "Configuration saved in output_dir\\checkpoint-450\\config.json\n",
      "Model weights saved in output_dir\\checkpoint-450\\model.safetensors\n",
      "\n",
      "***** Running Evaluation *****\n",
      "  Num examples = 1190\n",
      "  Batch size = 32\n",
      "Saving model checkpoint to output_dir\\checkpoint-480\n",
      "Configuration saved in output_dir\\checkpoint-480\\config.json\n",
      "Model weights saved in output_dir\\checkpoint-480\\model.safetensors\n",
      "\n",
      "***** Running Evaluation *****\n",
      "  Num examples = 1190\n",
      "  Batch size = 32\n",
      "Saving model checkpoint to output_dir\\checkpoint-510\n",
      "Configuration saved in output_dir\\checkpoint-510\\config.json\n",
      "Model weights saved in output_dir\\checkpoint-510\\model.safetensors\n",
      "\n",
      "***** Running Evaluation *****\n",
      "  Num examples = 1190\n",
      "  Batch size = 32\n",
      "Saving model checkpoint to output_dir\\checkpoint-540\n",
      "Configuration saved in output_dir\\checkpoint-540\\config.json\n",
      "Model weights saved in output_dir\\checkpoint-540\\model.safetensors\n",
      "\n",
      "***** Running Evaluation *****\n",
      "  Num examples = 1190\n",
      "  Batch size = 32\n",
      "Saving model checkpoint to output_dir\\checkpoint-570\n",
      "Configuration saved in output_dir\\checkpoint-570\\config.json\n",
      "Model weights saved in output_dir\\checkpoint-570\\model.safetensors\n",
      "Saving model checkpoint to output_dir\\checkpoint-594\n",
      "Configuration saved in output_dir\\checkpoint-594\\config.json\n",
      "Model weights saved in output_dir\\checkpoint-594\\model.safetensors\n",
      "\n",
      "\n",
      "Training completed. Do not forget to share your model on huggingface.co/models =)\n",
      "\n",
      "\n"
     ]
    },
    {
     "data": {
      "text/plain": "TrainOutput(global_step=594, training_loss=0.21973796645398894, metrics={'train_runtime': 238.4713, 'train_samples_per_second': 79.599, 'train_steps_per_second': 2.491, 'total_flos': 781872790237860.0, 'train_loss': 0.21973796645398894, 'epoch': 2.0})"
     },
     "execution_count": 63,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "trainer.train()"
   ],
   "metadata": {
    "collapsed": false
   }
  },
  {
   "cell_type": "code",
   "execution_count": 64,
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "\n",
      "***** Running Evaluation *****\n",
      "  Num examples = 1190\n",
      "  Batch size = 32\n"
     ]
    },
    {
     "data": {
      "text/plain": "<IPython.core.display.HTML object>",
      "text/html": "\n    <div>\n      \n      <progress value='1' max='38' style='width:300px; height:20px; vertical-align: middle;'></progress>\n      [ 1/38 : < :]\n    </div>\n    "
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "data": {
      "text/plain": "{'eval_loss': 0.22638437151908875,\n 'eval_accuracy': 0.9369747899159664,\n 'eval_runtime': 4.0351,\n 'eval_samples_per_second': 294.912,\n 'eval_steps_per_second': 9.417,\n 'epoch': 2.0}"
     },
     "execution_count": 64,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "trainer.evaluate()"
   ],
   "metadata": {
    "collapsed": false
   }
  },
  {
   "cell_type": "code",
   "execution_count": 67,
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "這 間 酒 店 環 境 和 服 務 態 度 亦 算 不 錯, 但 房 間 空 間 太 小 ~ ~ 不 宣 容 納 太 大 件 行 李 ~ ~ 且 房 間 格 調 還 可 以 ~ ~ 中 餐 廳 的 廣 東 點 心 不 太 好 吃 ~ ~ 要 改 善 之 ~ ~ ~ ~ 但 算 價 錢 平 宜 ~ ~ 可 接 受 ~ ~ 西 餐 廳 格 調 都 很 好 ~ ~ 但 吃 的 味 道 一 般 且 令 人 等 得 太 耐 了 ~ ~ 要 改 善 之 ~ ~\n",
      "label= 1\n",
      "predict= 1\n",
      "< 荐 书 > 推 荐 所 有 喜 欢 < 红 楼 > 的 红 迷 们 一 定 要 收 藏 这 本 书, 要 知 道 当 年 我 听 说 这 本 书 的 时 候 花 很 长 时 间 去 图 书 馆 找 和 借 都 没 能 如 愿, 所 以 这 次 一 看 到 当 当 有, 马 上 买 了, 红 迷 们 也 要 记 得 备 货 哦!\n",
      "label= 1\n",
      "predict= 1\n",
      "商 品 的 不 足 暂 时 还 没 发 现 ， 京 东 的 订 单 处 理 速 度 实 在....... 周 二 就 打 包 完 成 ， 周 五 才 发 货...\n",
      "label= 0\n",
      "predict= 0\n",
      "２００１ 年 来 福 州 就 住 在 这 里 ， 这 次 感 觉 房 间 就 了 点 ， 温 泉 水 还 是 有 的 ． 总 的 来 说 很 满 意 ． 早 餐 简 单 了 些 ．\n",
      "label= 1\n",
      "predict= 1\n",
      "不 错 的 上 网 本 ， 外 形 很 漂 亮 ， 操 作 系 统 应 该 是 个 很 大 的 卖 点 ， 电 池 还 可 以 。 整 体 上 讲 ， 作 为 一 个 上 网 本 的 定 位 ， 还 是 不 错 的 。\n",
      "label= 1\n",
      "predict= 1\n",
      "房 间 地 毯 太 脏 ， 临 近 火 车 站 十 分 吵 闹 ， 还 好 是 双 层 玻 璃 。 服 务 一 般 ， 酒 店 门 口 的 taxi 讲 是 酒 店 的 长 期 合 作 关 系 ， 每 月 要 交 费 给 酒 店 。 从 酒 店 到 机 场 讲 得 是 打 表 147 元 ， 到 了 后 非 要 200 元 ， 可 能 被 小 宰 30 - 40 元 。\n",
      "label= 0\n",
      "predict= 0\n",
      "本 来 想 没 事 的 时 候 翻 翻 ， 可 惜 看 不 下 去 ， 还 是 和 张 没 法 比 ， 他 的 书 能 畅 销 大 部 分 还 是 受 张 的 影 响 ， 对 这 个 男 人 实 在 是 没 好 感 ， 不 知 道 怎 么 买 的 ， 后 悔\n",
      "label= 0\n",
      "predict= 0\n",
      "这 台 机 外 观 十 分 好, 本 人 喜 欢, 性 能 不 错, 是 led 显 示 屏, 无 线 网 卡 是 : 5100agn 无 线 网 卡, 如 果 装 的 是 一 条 2g 800mhz 的 内 存 就 无 敌 了, 本 本 发 热 很 小, 总 体 来 说 是 十 分 值 得 买 的, 前 提 是 这 台 机 是 4299 买 的.\n",
      "label= 1\n",
      "predict= 1\n"
     ]
    }
   ],
   "source": [
    "model.eval()\n",
    "for i, data in enumerate(trainer.get_eval_dataloader()):\n",
    "    break\n",
    "out = model(**data)\n",
    "out = out['logits'].argmax(dim=1).cpu().numpy()\n",
    "for i in range(8):\n",
    "    print(tokenizer.decode(data['input_ids'][i], skip_special_tokens=True))\n",
    "    print(\"label=\", data['labels'][i].item())\n",
    "    print(\"predict=\", out[i].item())"
   ],
   "metadata": {
    "collapsed": false
   }
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "outputs": [],
   "source": [],
   "metadata": {
    "collapsed": false
   }
  }
 ],
 "metadata": {
  "kernelspec": {
   "name": "pytorch",
   "language": "python",
   "display_name": "pytorch"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 2
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython2",
   "version": "2.7.6"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 0
}
