{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "3469faf1-117a-412f-9339-3d223d5e9386",
   "metadata": {},
   "outputs": [],
   "source": [
    "from transformers import AutoTokenizer\n",
    "model_name = './gpt2'\n",
    "tokenizer = AutoTokenizer.from_pretrained(model_name)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "8b272dff-70fb-46f1-8ce5-ab3a5236912c",
   "metadata": {},
   "outputs": [],
   "source": [
    "from datasets import load_dataset\n",
    "dataset_name = './sst2'\n",
    "dataset = load_dataset(dataset_name)\n",
    "dataset"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "7753d1c0-f5af-4e9f-b220-d535b8fcb8ae",
   "metadata": {},
   "outputs": [],
   "source": [
    "ds_train, ds_val = dataset['train'], dataset['validation']"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "11d8764a-e703-464b-92b5-c47f1aa2bebb",
   "metadata": {},
   "outputs": [],
   "source": [
    "ds_train[4]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "bb7eade5-3990-4355-92cf-d60cce301c0f",
   "metadata": {},
   "outputs": [],
   "source": [
    "REWARD_TOKEN_ID = tokenizer.eos_token_id"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "aba0e0e3-f219-426f-b35f-a99f5d3b6d01",
   "metadata": {},
   "outputs": [],
   "source": [
    "REWARD_TOKEN_ID"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "5c06f63d-daed-4b73-a0ae-88340b5dba7a",
   "metadata": {},
   "outputs": [],
   "source": [
    "def tokenize(batch):\n",
    "    outputs = tokenizer(batch['sentence'])\n",
    "    outputs['score'] = [0] * len(outputs['input_ids'])\n",
    "    outputs['score_index'] = [0] * len(outputs['input_ids'])\n",
    "    for i in range(len(outputs['input_ids'])):\n",
    "        outputs['input_ids'][i].append(REWARD_TOKEN_ID)\n",
    "        outputs['attention_mask'][i].append(1)\n",
    "        outputs['score'][i] = float(batch['label'][i])\n",
    "        outputs['score_index'][i] = len(outputs['input_ids'][i]) - 1\n",
    "    return outputs\n",
    "\n",
    "map_kwargs = {\n",
    "    \"batched\": True,\n",
    "    \"batch_size\": 512,\n",
    "    \"remove_columns\": ['idx', 'sentence', 'label']\n",
    "}\n",
    "\n",
    "tokenized_dataset_train = ds_train.map(tokenize, **map_kwargs)\n",
    "tokenized_dataset_val = ds_val.map(tokenize, **map_kwargs)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "3b8f5041-71b0-455f-8f59-b3fd6d150ef8",
   "metadata": {},
   "outputs": [],
   "source": [
    "tokenized_dataset_train[4]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "a7017e30-5503-4ae3-879b-4f875b1a8a5f",
   "metadata": {},
   "outputs": [],
   "source": [
    "tokenized_dataset_train.set_format(type='torch')\n",
    "tokenized_dataset_val.set_format(type='torch')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "94e759ad-3cc3-46b8-adad-0dd97b26318f",
   "metadata": {},
   "outputs": [],
   "source": [
    "tokenized_dataset_train[4]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "1e3e1e78-d2ae-447b-bcfc-8a3eb4959301",
   "metadata": {},
   "outputs": [],
   "source": [
    "tokenized_dataset_train = tokenized_dataset_train.filter(lambda x: len(x['input_ids']) > 6)\n",
    "tokenized_dataset_val = tokenized_dataset_val.filter(lambda x: len(x['input_ids']) > 6)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "743506a1-1fc8-4cfa-90dd-296820b180c0",
   "metadata": {},
   "outputs": [],
   "source": [
    "len(tokenized_dataset_train)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "29a80228-ce0d-4a7b-a64f-6b084d9b39b7",
   "metadata": {},
   "outputs": [],
   "source": [
    "import torch\n",
    "from torch import nn\n",
    "import numpy as np\n",
    "from transformers import AutoModelForCausalLM\n",
    "\n",
    "class RewardHead(nn.Module):\n",
    "    def __init__(self, config):\n",
    "        super().__init__()\n",
    "        # llm最后输出的隐藏层的维度\n",
    "        self.hidden_size = config.hidden_size\n",
    "        # 线性层用来对llm最后输出的隐藏层给奖励\n",
    "        self.reward = nn.Linear(self.hidden_size, 1)\n",
    "        self._post_init()\n",
    "\n",
    "    def _post_init(self):\n",
    "        # 使用正态分布初始化权重\n",
    "        nn.init.normal_(\n",
    "            self.reward.weight,\n",
    "            std=(1.0 / np.sqrt(self.hidden_size + 1))\n",
    "        )\n",
    "        # 将偏置初始化为0\n",
    "        nn.init.zeros_(self.reward.bias)\n",
    "\n",
    "    def forward(self, hidden_states):\n",
    "        # 给出奖励\n",
    "        return self.reward(hidden_states)\n",
    "\n",
    "class GPT2RewardHead(nn.Module):\n",
    "    def __init__(self, model_name):\n",
    "        super().__init__()\n",
    "        self.llm = AutoModelForCausalLM.from_pretrained(model_name)\n",
    "        self.reward_head = RewardHead(self.llm.config)\n",
    "\n",
    "    def forward(self, input_ids, attention_mask):\n",
    "        transformer_outputs = self.llm.forward(\n",
    "            input_ids=input_ids,\n",
    "            attention_mask=attention_mask,\n",
    "            output_hidden_states=True\n",
    "        )\n",
    "        last_hidden_state = transformer_outputs.hidden_states[-1]\n",
    "        # 给出奖励\n",
    "        reward = self.reward_head(last_hidden_state).squeeze(-1)\n",
    "        # sigmoid用来将奖励搞到(-1,1)范围内\n",
    "        return torch.sigmoid(reward)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "5f6250aa-cc8b-4172-848b-5bad125c44fa",
   "metadata": {},
   "outputs": [],
   "source": [
    "model = GPT2RewardHead(model_name)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "331cbab7-708b-4fd1-9dd7-6d5bf7d65725",
   "metadata": {},
   "outputs": [],
   "source": [
    "from torch.utils.data import DataLoader\n",
    "from transformers import DataCollatorWithPadding\n",
    "# 还是将 eos token 作为 pad token\n",
    "tokenizer.pad_token = tokenizer.eos_token\n",
    "\n",
    "data_collator = DataCollatorWithPadding(tokenizer)\n",
    "dataloader_params = {\n",
    "    'batch_size': 32,\n",
    "    'shuffle': True,\n",
    "    'collate_fn': data_collator\n",
    "}\n",
    "train_dataloader = DataLoader(tokenized_dataset_train, **dataloader_params)\n",
    "val_dataloader = DataLoader(tokenized_dataset_val, **dataloader_params)\n",
    "\n",
    "batch = next(iter(train_dataloader))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "761c5017-9afc-4d28-b3e6-8bbaf669ff67",
   "metadata": {},
   "outputs": [],
   "source": [
    "batch.keys()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "bb84d736-ee8c-467d-a63d-5206ba0ba051",
   "metadata": {},
   "outputs": [],
   "source": [
    "batch['input_ids'][1]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "5bd2854f-a109-47bc-8f89-8eb89883b250",
   "metadata": {},
   "outputs": [],
   "source": [
    "batch['attention_mask'][1]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "38354754-1608-4ac6-b60f-3bac75fad756",
   "metadata": {},
   "outputs": [],
   "source": [
    "batch['score'][1]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "859511d4-1fb1-4255-b3a1-37004bae843c",
   "metadata": {},
   "outputs": [],
   "source": [
    "batch['score_index'][1]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "d161e3c1-088a-4c41-a83b-dadaf7edd3e7",
   "metadata": {},
   "outputs": [],
   "source": [
    "tokenizer.decode(batch['input_ids'][1])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "21639ee6-17bf-4fdd-931f-a245c78e5ce3",
   "metadata": {},
   "outputs": [],
   "source": [
    "batch['attention_mask'][1].nonzero()[-1]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "5fd85040-0148-4158-85f6-5bc57dd759f5",
   "metadata": {},
   "outputs": [],
   "source": [
    "outputs = model(batch['input_ids'], batch['attention_mask'])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "195a0ede-ec98-4d98-b3ea-2b4411c8fec9",
   "metadata": {},
   "outputs": [],
   "source": [
    "outputs.shape"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "adc86fb2-e3a6-4d9e-a0df-d5416f37f94c",
   "metadata": {},
   "outputs": [],
   "source": [
    "device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')\n",
    "optimizer = torch.optim.AdamW(model.parameters(), lr=1e-4)\n",
    "criterion = nn.BCELoss()\n",
    "num_epochs = 1 # N+ Implementation Detail paper"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "398a7a74-08f3-46f6-9db8-9e8a77f4e54a",
   "metadata": {},
   "outputs": [],
   "source": [
    "def validate():\n",
    "    model.eval()\n",
    "    total_loss = 0\n",
    "    for i, batch in enumerate(val_dataloader):\n",
    "        inputs = batch.to(device)\n",
    "        model_inputs = {\n",
    "            'input_ids': inputs['input_ids'],\n",
    "            'attention_mask': inputs['attention_mask']\n",
    "        }\n",
    "        with torch.no_grad():\n",
    "            scores = model(**model_inputs)\n",
    "            batch_indices = torch.arange(scores.shape[0])\n",
    "            score = scores[batch_indices, inputs['score_index']]\n",
    "            target = inputs['score']\n",
    "            loss = criterion(score, target)\n",
    "        total_loss += loss.item()\n",
    "    print('validation loss:', total_loss / len(val_dataloader))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "95d31d53-0db6-4e1b-b424-58087c8af683",
   "metadata": {},
   "outputs": [],
   "source": [
    "model.to(device)\n",
    "\n",
    "validate()\n",
    "for epoch in range(num_epochs):\n",
    "    model.train()\n",
    "    for i, batch in enumerate(train_dataloader):\n",
    "        inputs = batch.to(device)\n",
    "        model_inputs = {\n",
    "            'input_ids': inputs['input_ids'],\n",
    "            'attention_mask': inputs['attention_mask']\n",
    "        }\n",
    "        scores = model(**model_inputs)\n",
    "        batch_indices = torch.arange(scores.shape[0])\n",
    "        score = scores[batch_indices, inputs['score_index']]\n",
    "        target = inputs['score']\n",
    "        loss = criterion(score, target)\n",
    "        optimizer.zero_grad()\n",
    "        loss.backward()\n",
    "        optimizer.step()\n",
    "        print(loss.item())\n",
    "    validate()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "8b52e2cb-5a34-48a3-8702-f7095404ca4c",
   "metadata": {},
   "outputs": [],
   "source": [
    "torch.save(model.state_dict(), 'reward_model.pt')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "df537605-cc41-47f4-b818-52dcba83f578",
   "metadata": {},
   "outputs": [],
   "source": [
    "validate()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "3afd1aa2-bb97-4c79-8903-d3b0b9fd3c0b",
   "metadata": {},
   "outputs": [],
   "source": [
    "from sklearn.metrics import confusion_matrix\n",
    "model.eval()\n",
    "\n",
    "all_predictions = []\n",
    "all_labels = []\n",
    "\n",
    "for i, batch in enumerate(val_dataloader):\n",
    "    inputs = batch.to(device)\n",
    "    model_inputs = {\n",
    "        'input_ids': inputs['input_ids'],\n",
    "        'attention_mask': inputs['attention_mask']\n",
    "    }\n",
    "    with torch.no_grad():\n",
    "        scores = model(**model_inputs)\n",
    "        batch_indices = torch.arange(scores.shape[0])\n",
    "        score = scores[batch_indices, inputs['score_index']]\n",
    "        target = inputs['score']\n",
    "    predictions = (score > 0.5).int()\n",
    "\n",
    "    all_predictions.extend(predictions.cpu().numpy())\n",
    "    all_labels.extend(target.cpu().numpy())\n",
    "\n",
    "confusion_matrix(all_labels, all_predictions)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "5f367ead-8f1d-4c3e-bdf8-0db4f7500b07",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.12.7"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
