{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 9,
   "id": "dda1072a-f617-48f0-9c87-0c6aeb093db8",
   "metadata": {},
   "source": [
    "import torch\n",
    "from torch.utils.data import DataLoader\n",
    "from torch.optim.adamw import AdamW\n",
    "import numpy as np\n",
    "from transformers import (\n",
    "    AutoTokenizer,\n",
    "    AutoModelForSequenceClassification,\n",
    "    Trainer,\n",
    "    DataCollatorWithPadding,\n",
    "    TrainingArguments)\n",
    "from datasets import load_dataset\n",
    "import platform"
   ],
   "outputs": []
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "id": "f5b74124-27a2-4343-9c51-5dcd303a11dc",
   "metadata": {},
   "source": [
    "# 如果报错ValueError: Invalid pattern: '**' can only be an entire path component，升级datasets:\n",
    "# pip install -U datasets\n",
    "# HF_HOME 可以自定义缓存文件夹\n",
    "if platform.system() == 'Windows':\n",
    "    model_path = 'E:/ai/huggingface-models/distilbert-base-uncased-finetuned-sst-2-english/'\n",
    "    data_path = \"E:/ai/data/glue/mrpc\"\n",
    "else:\n",
    "    model_path = '/home/will/huggingface-models/distilbert-base-uncased-finetuned-sst-2-english/'\n",
    "    data_path = '/home/will/glue/mrpc'\n",
    "\n",
    "checkpoint = \"distilbert-base-uncased-finetuned-sst-2-english\""
   ],
   "outputs": []
  },
  {
   "cell_type": "code",
   "execution_count": 19,
   "id": "3fd122e0-d4e2-403a-9c42-14da26edcc6a",
   "metadata": {},
   "source": [
    "raw_dataset = load_dataset(path=data_path)\n",
    "tokenizer = AutoTokenizer.from_pretrained(checkpoint)\n",
    "model = AutoModelForSequenceClassification.from_pretrained(model_path, num_labels=2)\n",
    "optimizer = AdamW(model.parameters(), lr=5e-5)\n",
    "\n",
    "def tokenize_function(example):\n",
    "    return tokenizer(example['sentence1'], example['sentence2'], truncation=True)\n",
    "\n",
    "tokenized_dataset = raw_dataset.map(tokenize_function, batched=True)\n",
    "data_collator = DataCollatorWithPadding(tokenizer)\n",
    "\n",
    "tokenized_dataset = tokenized_dataset.remove_columns(['sentence1', 'sentence2', 'idx'])\n",
    "tokenized_dataset = tokenized_dataset.rename_column('label', 'labels')\n",
    "tokenized_dataset.set_format('torch')\n",
    "tokenized_dataset['train'].column_names"
   ],
   "outputs": []
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "id": "ed3d8917-3673-478e-99cb-80fc0243463d",
   "metadata": {},
   "source": [
    "train_dataloader = DataLoader(\n",
    "    tokenized_dataset['train'], shuffle=True, batch_size=8, collate_fn=data_collator\n",
    ")\n",
    "eval_dataloader = DataLoader(\n",
    "    tokenized_dataset['validation'], batch_size=8, collate_fn=data_collator\n",
    ")"
   ],
   "outputs": []
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "id": "e8f3da2f-faff-4ff5-bb50-b9d1f12ebfef",
   "metadata": {},
   "source": [
    "for batch in train_dataloader:\n",
    "    break\n",
    "{k:v.shape for k,v in batch.items()}"
   ],
   "outputs": []
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "id": "4e227627-f6d4-4172-9b4f-067299088826",
   "metadata": {},
   "source": [
    "output = model(**batch)\n",
    "print(output.loss, output.logits.shape)"
   ],
   "outputs": []
  },
  {
   "cell_type": "code",
   "execution_count": 20,
   "id": "35b8c534-5d1f-4b40-bc4f-3c2b87e8d585",
   "metadata": {},
   "source": [
    "from transformers import get_scheduler\n",
    "\n",
    "num_epochs = 3\n",
    "num_training_steps = num_epochs * len(train_dataloader)\n",
    "lr_scheduler = get_scheduler(\n",
    "    'linear',\n",
    "    optimizer=optimizer,\n",
    "    num_warmup_steps=0,\n",
    "    num_training_steps=num_training_steps,\n",
    ")\n",
    "print(num_training_steps)"
   ],
   "outputs": []
  },
  {
   "cell_type": "code",
   "execution_count": 21,
   "id": "ac807d6c-07f7-4ff7-9b7c-441f2be17425",
   "metadata": {},
   "source": [
    "device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n",
    "model.to(device)\n",
    "device"
   ],
   "outputs": []
  },
  {
   "cell_type": "code",
   "execution_count": 25,
   "id": "7e4dbc94-6c96-4ac9-92ec-5ffff1bf012c",
   "metadata": {},
   "source": [
    "from tqdm.auto import tqdm\n",
    "from accelerate import Accelerator\n",
    "\n",
    "accelerator = Accelerator()\n",
    "\n",
    "train_dataloader, eval_dataloader, model, optimizer = accelerator.prepare(\n",
    "    train_dataloader, eval_dataloader, model, optimizer\n",
    ")\n",
    "\n",
    "progress_bar = tqdm(range(num_training_steps))\n",
    "\n",
    "model.train()\n",
    "for epoch in range(num_epochs):\n",
    "    for batch in train_dataloader:\n",
    "        # batch = {k:v.to(device) for k, v in batch.items()}\n",
    "        outputs = model(**batch)\n",
    "        loss = outputs.loss\n",
    "        #loss.backward()\n",
    "        accelerator.backward(loss)\n",
    "\n",
    "        optimizer.step()\n",
    "        lr_scheduler.step()\n",
    "        optimizer.zero_grad()\n",
    "        progress_bar.update(1)"
   ],
   "outputs": []
  },
  {
   "cell_type": "code",
   "execution_count": 26,
   "id": "897685e8-9ab7-4b4d-8d20-da6e18b63ceb",
   "metadata": {},
   "source": [
    "import evaluate\n",
    "\n",
    "metric = evaluate.load('glue', 'mrpc')\n",
    "model.eval()\n",
    "\n",
    "for batch in eval_dataloader:\n",
    "    batch = {k: v.to(device) for k,v in batch.items()}\n",
    "    with torch.no_grad():\n",
    "        outputs = model(**batch)\n",
    "    logits = outputs.logits\n",
    "    predictions = torch.argmax(logits, dim=-1)\n",
    "    metric.add_batch(predictions=predictions, references=batch['labels'])\n",
    "# first try:{'accuracy': 0.7181372549019608, 'f1': 0.8244274809160306}\n",
    "metric.compute()"
   ],
   "outputs": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.10.12"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
