{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "import torch\n",
    "from torch import nn\n",
    "from transformers import BertModel, AutoTokenizer\n",
    "from torch.utils.data import Dataset, DataLoader\n",
    "from torch.utils.data import random_split\n",
    "import pandas as pd\n",
    "from torch.optim import Adam\n",
    "from tqdm import tqdm\n",
    "import time\n",
    "\n",
    "import sys\n",
    "sys.path.append('..')\n",
    "from settings import BERT_PATH\n",
    "\n",
    "from transformers import BertForSequenceClassification\n",
    "from static_var import bert_cls_path, BERT_train_file\n",
    "\n",
    "import pandas as pd\n",
    "import wandb"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "('bert_cls.pth', '../data/bert_train_dataset.csv')"
      ]
     },
     "execution_count": 5,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "bert_cls_path, BERT_train_file"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "'../data/bert_train_dataset.csv'"
      ]
     },
     "execution_count": 8,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "BERT_train_file"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [],
   "source": [
    "device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n",
    "config = {\n",
    "    \"batch\": 16,\n",
    "    \"epochs\": 8,\n",
    "    \"device\": device,\n",
    "    \"dataset\": BERT_train_file,\n",
    "    \"model_path\": BERT_PATH,\n",
    "    \"new_model\": \"wuhan_cls.pth\",\n",
    "    \"output\": 88,\n",
    "    \"warmup_steps\": 500,\n",
    "}\n",
    "device = config[\"device\"]\n",
    "tokenizer = AutoTokenizer.from_pretrained(config[\"model_path\"])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {},
   "outputs": [],
   "source": [
    "class CSVDataset(Dataset):\n",
    "    def __init__(self, csv_file, feature_column, target_column, tokenizer):\n",
    "        self.data = pd.read_csv(csv_file)\n",
    "        self.target_column = target_column\n",
    "        self.feature_column = feature_column\n",
    "        self.tokenizer = tokenizer\n",
    "        self.targets = self.data[target_column].values\n",
    "        self.data = self.data.drop(columns=[target_column])\n",
    "\n",
    "    def __len__(self):\n",
    "        return len(self.data)\n",
    "\n",
    "    def __getitem__(self, idx):\n",
    "        if torch.is_tensor(idx):\n",
    "            idx = idx.tolist()\n",
    "\n",
    "        sample = self.data.loc[idx, self.feature_column]\n",
    "        target = self.targets[idx]\n",
    "        # sample = torch.tensor(sample.values, dtype=torch.float32)\n",
    "        target = torch.tensor(target, dtype=torch.long)  # 分类标签通常是长整型\n",
    "        return sample, target\n",
    "\n",
    "\n",
    "# def split_dataset(dataset):\n",
    "#     trainset, testset = random_split(dataset, [0.95, 0.05])\n",
    "#     return trainset, testset\n",
    "\n",
    "\n",
    "csv_file = config[\"dataset\"]\n",
    "feature_column = \"text\"\n",
    "target_column = \"label\"\n",
    "dataset = CSVDataset(csv_file, feature_column, target_column, tokenizer)\n",
    "train_dataset = dataset"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "metadata": {},
   "outputs": [],
   "source": [
    "def collate_fn(item):\n",
    "    feature, label = zip(*item)\n",
    "    feature = tokenizer(\n",
    "        feature,\n",
    "        padding=\"max_length\",\n",
    "        max_length=512,\n",
    "        truncation=True,\n",
    "        return_tensors=\"pt\",\n",
    "    )\n",
    "    label = torch.stack(label)\n",
    "    return feature, label\n",
    "\n",
    "\n",
    "train_dataloader = DataLoader(\n",
    "    train_dataset,\n",
    "    batch_size=config[\"batch\"],\n",
    "    shuffle=True,\n",
    "    collate_fn=collate_fn,\n",
    "    pin_memory=True,\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 23,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "torch.Size([16, 512])\n",
      "torch.Size([16])\n"
     ]
    }
   ],
   "source": [
    "for feature, label in train_dataloader:\n",
    "    print(feature[\"input_ids\"].shape)\n",
    "    print(label.shape)\n",
    "    break"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "model = BertForSequenceClassification.from_pretrained(\n",
    "    config[\"model_path\"],  # Use the 12-layer BERT model, with an uncased vocab.\n",
    "    num_labels=config[\"output\"],\n",
    "    output_attentions=False,  # Whether the model returns attentions weights.\n",
    "    output_hidden_states=False,  # Whether the model returns all hidden-states.\n",
    ")\n",
    "model.cuda()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 25,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "3646"
      ]
     },
     "execution_count": 25,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "len(train_dataloader)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 30,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "8"
      ]
     },
     "execution_count": 30,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "config[\"epochs\"]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 31,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "29168"
      ]
     },
     "execution_count": 31,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "len(train_dataloader) * config[\"epochs\"]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 34,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/home/jie/anaconda3/envs/llm/lib/python3.10/site-packages/transformers/optimization.py:521: FutureWarning: This implementation of AdamW is deprecated and will be removed in a future version. Use the PyTorch implementation torch.optim.AdamW instead, or set `no_deprecation_warning=True` to disable this warning\n",
      "  warnings.warn(\n"
     ]
    }
   ],
   "source": [
    "from transformers import AdamW, get_linear_schedule_with_warmup\n",
    "\n",
    "optimizer = AdamW(\n",
    "    model.parameters(),\n",
    "    lr=2e-5,  # args.learning_rate - default is 5e-5, our notebook had 2e-5\n",
    "    eps=1e-8,  # args.adam_epsilon  - default is 1e-8.\n",
    ")\n",
    "\n",
    "total_steps = len(train_dataloader) * config[\"batch\"]\n",
    "\n",
    "# Create the learning rate scheduler.\n",
    "scheduler = get_linear_schedule_with_warmup(\n",
    "    optimizer,\n",
    "    num_warmup_steps=2900,  # Default value in run_glue.py\n",
    "    num_training_steps=total_steps,\n",
    ")\n",
    "\n",
    "\n",
    "def run_one_epoch(\n",
    "    model,\n",
    "    dataloader,\n",
    "    is_train,\n",
    "    optimizer,\n",
    "    scheduler,\n",
    "    wandb,\n",
    "    batch=config[\"batch\"],\n",
    "):\n",
    "    def run():\n",
    "        start = time.time()\n",
    "        total_loss = 0\n",
    "        total_acc = 0\n",
    "        nums = len(dataloader) * batch\n",
    "        for feature, label in tqdm(dataloader):\n",
    "            label = label.to(device)\n",
    "            if is_train:\n",
    "                optimizer.zero_grad()\n",
    "            input = {k: v.squeeze(1).to(device) for k, v in feature.items()}\n",
    "            output = model(**input, labels=label)\n",
    "            loss = output[\"loss\"]\n",
    "            logits = output[\"logits\"]\n",
    "            if is_train:\n",
    "                loss.backward()\n",
    "                torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)\n",
    "                optimizer.step()\n",
    "                scheduler.step()\n",
    "            total_loss += loss.item()\n",
    "            acc = (logits.argmax(dim=-1) == label.view(-1)).sum().item()\n",
    "            wandb.log(\n",
    "                {\"acc\": acc, \"loss\": loss.item(), \"lr\": scheduler.get_last_lr()[0]}\n",
    "            )\n",
    "            total_acc += acc\n",
    "            \n",
    "        end = time.time()\n",
    "        print(\"耗费时间：\", end - start, \"秒\")\n",
    "        return total_loss / nums, total_acc / nums\n",
    "\n",
    "    if is_train:\n",
    "        model.train()\n",
    "        return run()\n",
    "\n",
    "    if not is_train:\n",
    "        model.eval()\n",
    "        with torch.no_grad():\n",
    "            return run()\n",
    "\n",
    "\n",
    "def train(\n",
    "    model,\n",
    "    optimizer,\n",
    "    scheduler,\n",
    "    wandb,\n",
    "    epochs=config[\"epochs\"],\n",
    "):\n",
    "    min_loss = float(\"inf\")\n",
    "    for epoch_num in range(epochs):\n",
    "        train_loss, train_acc = run_one_epoch(\n",
    "            model, train_dataloader, True, optimizer, scheduler, wandb\n",
    "        )\n",
    "        print(f\"Epoch: {epoch_num + 1}\")\n",
    "        print(f\"Train loss: {train_loss:.4f}, Train acc: {train_acc:.4f}\")\n",
    "\n",
    "        if train_loss < min_loss:\n",
    "            min_loss = train_loss\n",
    "            print(epoch_num, \"save model\")\n",
    "            torch.save(model.state_dict(), config[\"new_model\"])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# start a new wandb run to track this script\n",
    "wandb.init(\n",
    "    # set the wandb project where this run will be logged\n",
    "    project=\"pku_industry\",\n",
    "    # track hyperparameters and run metadata\n",
    "    config={\n",
    "        \"architecture\": \"BERT\",\n",
    "        \"epochs\": config[\"epochs\"],\n",
    "    },\n",
    ")\n",
    "train(model, optimizer, scheduler, wandb)\n",
    "wandb.finish()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "llm",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.10.13"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
