{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 5,
   "id": "37ef0c38-85cf-4478-b10e-7dfd9b59f0d8",
   "metadata": {},
   "outputs": [],
   "source": [
    "import torch\n",
    "from typing import Optional\n",
    "from torch import nn\n",
    "import numpy as np\n",
    "from transformers import AutoModelForCausalLM\n",
    "\n",
    "class RewardHead(nn.Module):\n",
    "    \"\"\"\n",
    "    RewardHead类给GPT2实现了一个“头”，为每个输出的token返回一个标量值。\n",
    "    \"\"\"\n",
    "\n",
    "    def __init__(self, config):\n",
    "        super().__init__()\n",
    "        self.hidden_size = config.hidden_size\n",
    "        self.reward = nn.Linear(self.hidden_size, 1)\n",
    "        self._post_init()\n",
    "\n",
    "    def _post_init(self):\n",
    "        nn.init.normal_(self.reward.weight, std=(1.0 / np.sqrt(self.hidden_size + 1)))\n",
    "        nn.init.zeros_(self.reward.bias)\n",
    "\n",
    "    def forward(self, hidden_states):\n",
    "        output = hidden_states\n",
    "        return self.reward(output)\n",
    "\n",
    "\n",
    "class GPT2RewardModel(nn.Module):\n",
    "    \"\"\"\n",
    "    GPT2模型加上一个“奖励头”\n",
    "    \"\"\"\n",
    "\n",
    "    def __init__(self, model_name):\n",
    "        super().__init__()\n",
    "        self.llm = AutoModelForCausalLM.from_pretrained(model_name)\n",
    "        # 添加奖励头\n",
    "        self.reward_head = RewardHead(self.llm.config)\n",
    "\n",
    "    def forward(\n",
    "        self,\n",
    "        input_ids,\n",
    "        attention_mask,\n",
    "    ) -> Optional[torch.FloatTensor]:\n",
    "        # GPT2的输出\n",
    "        transformer_outputs = self.llm.forward(\n",
    "            input_ids,\n",
    "            attention_mask=attention_mask,\n",
    "            output_hidden_states = True,\n",
    "        )\n",
    "\n",
    "        # 获取最后一层隐藏层\n",
    "        last_hidden_state = transformer_outputs.hidden_states[-1]\n",
    "\n",
    "        # 对隐藏层给出奖励\n",
    "        rewards = self.reward_head(last_hidden_state).squeeze(-1)\n",
    "        # 归一化\n",
    "        return torch.sigmoid(rewards)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "id": "5411ac1f-6e3c-4deb-a588-54401c09ab9f",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "<All keys matched successfully>"
      ]
     },
     "execution_count": 6,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "model_name = \"./gpt2\"\n",
    "reward_model = GPT2RewardModel(model_name)\n",
    "reward_model.load_state_dict(torch.load(\"reward_model.pt\", map_location='cpu'))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "id": "5c4cf232-aba9-463e-b127-c4568838e8b6",
   "metadata": {},
   "outputs": [],
   "source": [
    "import torch\n",
    "from typing import Optional\n",
    "from torch import nn\n",
    "import numpy as np\n",
    "from transformers import AutoModelForCausalLM\n",
    "\n",
    "class ValueHead(nn.Module):\n",
    "    \"\"\"\n",
    "    ValueHead类为GPT2实现了一个“头”，会为输出的每个token返回一个标量值\n",
    "    标量值就是这个token的价值，ValueHead就是评论家。\n",
    "    \"\"\"\n",
    "\n",
    "    def __init__(self, config):\n",
    "        super().__init__()\n",
    "        self.hidden_size = config.hidden_size\n",
    "        self.value = nn.Linear(self.hidden_size, 1)\n",
    "        self._post_init()\n",
    "\n",
    "    def _post_init(self):\n",
    "        nn.init.normal_(self.value.weight, std=(1.0 / np.sqrt(self.hidden_size + 1)))\n",
    "        nn.init.zeros_(self.value.bias)\n",
    "\n",
    "    def forward(self, hidden_states):\n",
    "        output = hidden_states\n",
    "        return self.value(output)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "id": "6e36d430-a524-4931-a11b-92216a394872",
   "metadata": {},
   "outputs": [],
   "source": [
    "class ModelForCausalLMWithValueHead(nn.Module):\n",
    "    \"\"\"\n",
    "    GPT2模型+一个价值头\n",
    "    \"\"\"\n",
    "\n",
    "    def __init__(self, model_path):\n",
    "        super().__init__()\n",
    "        # 这个要初始化为我们微调出来的gpt2-sft模型\n",
    "        # actor演员模型\n",
    "        self.llm = AutoModelForCausalLM.from_pretrained(model_path)\n",
    "        # 添加价值头\n",
    "        # critic评论家模型\n",
    "        self.v_head = ValueHead(self.llm.config)\n",
    "\n",
    "    def forward(\n",
    "        self,\n",
    "        input_ids,\n",
    "        attention_mask,\n",
    "    ) -> Optional[torch.FloatTensor]:\n",
    "        # gpt2-sft模型的输出\n",
    "        transformer_outputs = self.llm.forward(\n",
    "            input_ids,\n",
    "            attention_mask=attention_mask,\n",
    "            output_hidden_states = True,\n",
    "        )\n",
    "        # 输出的token\n",
    "        lm_logits = transformer_outputs.logits\n",
    "        # 获取最后一层隐藏层\n",
    "        last_hidden_state = transformer_outputs.hidden_states[-1]\n",
    "\n",
    "        # 评估token的价值\n",
    "        value = self.v_head(last_hidden_state).squeeze(-1)\n",
    "        # 返回输出的token和token的价值\n",
    "        return lm_logits, value\n",
    "\n",
    "    def generate(self, *args, **kwargs):\n",
    "        return self.llm.generate(*args, **kwargs)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "id": "3ed599a5-e0ad-4480-9673-798a71c2e32e",
   "metadata": {},
   "outputs": [],
   "source": [
    "model_path = './gpt2-sft'\n",
    "model = ModelForCausalLMWithValueHead(model_path)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "id": "d1d50a6e-c299-4906-b10d-f18cfce43462",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "DatasetDict({\n",
      "    train: Dataset({\n",
      "        features: ['idx', 'sentence', 'label'],\n",
      "        num_rows: 67349\n",
      "    })\n",
      "    validation: Dataset({\n",
      "        features: ['idx', 'sentence', 'label'],\n",
      "        num_rows: 872\n",
      "    })\n",
      "    test: Dataset({\n",
      "        features: ['idx', 'sentence', 'label'],\n",
      "        num_rows: 1821\n",
      "    })\n",
      "})\n"
     ]
    }
   ],
   "source": [
    "from transformers import AutoTokenizer\n",
    "tokenizer = AutoTokenizer.from_pretrained(model_name)\n",
    "tokenizer.pad_token = tokenizer.eos_token\n",
    "\n",
    "from datasets import load_dataset\n",
    "dataset = load_dataset(\"./sst2\")\n",
    "print(dataset)\n",
    "\n",
    "ds_train, ds_val = dataset['train'], dataset['validation']"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "id": "aca568ee-75be-4804-a8a0-889e8408a93d",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "67349\n",
      "31105\n",
      "807\n"
     ]
    }
   ],
   "source": [
    "print(len(ds_train))\n",
    "ds_train = ds_train.filter(lambda x: len(x['sentence'].split(' ')) > 8)\n",
    "ds_val = ds_val.filter(lambda x: len(x['sentence'].split(' ')) > 8)\n",
    "\n",
    "print(len(ds_train))\n",
    "print(len(ds_val))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "id": "715c7a4f-e97c-415d-93f1-5163bc796c63",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[2, 3, 4, 5, 6, 7]\n",
      "6\n"
     ]
    }
   ],
   "source": [
    "import random\n",
    "input_min_token_length = 2\n",
    "input_max_token_length = 8\n",
    "input_token_length_range = list(range(input_min_token_length, input_max_token_length))\n",
    "print(input_token_length_range)\n",
    "print(random.choice(input_token_length_range))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "id": "f49d1079-fb19-48f8-9bf0-0e1da7982fec",
   "metadata": {},
   "outputs": [],
   "source": [
    "def tokenize(sample):\n",
    "    input_size = random.choice(input_token_length_range)\n",
    "    sample['input_ids'] = tokenizer.encode(sample['sentence'])[:input_size]\n",
    "    sample['attention_mask'] = [1] * len(sample['input_ids'])\n",
    "    sample['query'] = tokenizer.decode(sample['input_ids'])\n",
    "    return sample\n",
    "\n",
    "map_kwargs = {\n",
    "    \"batched\": False,\n",
    "    \"remove_columns\": ['idx', 'sentence', 'label']\n",
    "}\n",
    "\n",
    "tokenized_dataset_train = ds_train.map(tokenize, **map_kwargs)\n",
    "tokenized_dataset_val = ds_val.map(tokenize, **map_kwargs)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "id": "23802716-aeb1-4b36-8317-3604c0bd6de3",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "{'input_ids': tensor([1640,  883, 3807]), 'attention_mask': tensor([1, 1, 1]), 'query': 'for those movie'}\n"
     ]
    }
   ],
   "source": [
    "tokenized_dataset_train.set_format(type='torch')\n",
    "tokenized_dataset_val.set_format(type='torch')\n",
    "\n",
    "print(tokenized_dataset_train[6])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "id": "d2b3f4b4-37de-4869-b713-e3e375a0884a",
   "metadata": {},
   "outputs": [],
   "source": [
    "REWARD_TOKEN_ID = tokenizer.eos_token_id"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "id": "f2a12740-3f27-4666-936d-fa203e91c680",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "{'input_ids': [tensor([ 272,  625,  301, 2645, 1143,  837, 1308]), tensor([ 1350, 13504,   262,  2126,   286,  1521]), tensor([18108,   645,  5876]), tensor([ 11, 644, 705,  82]), tensor([ 4480,   663,  2426,  2300,   287,   257, 14854]), tensor([   64, 12625,  4065]), tensor([5661,  318]), tensor([  732,   705,   260, 12908,   510,   287]), tensor([11545,  5895,   326,   285,    13]), tensor([18820,  1327,   284,   307,  8258,   287]), tensor([1101, 1654,  612,  705,   82,  257]), tensor([  338,   355, 22066]), tensor([   7,  479, 1370]), tensor([  11, 4437,  705,   82]), tensor([ 4491,   933,   869,   283, 19623, 31432,   710]), tensor([  805,  1095,  1239,   284,  1663, 14262]), tensor([ 1078,  1791,   284,  2222, 44182]), tensor([   11, 14187, 22874,   837]), tensor([ 1659,   262,  1266, 34549]), tensor([13116,   326,   340]), tensor([ 8873,   560, 10721]), tensor([ 1169,  5884,  3923,   286,  1449, 17868,   290]), tensor([13959,  1613,  2636,   318,   588]), tensor([23108,   286,   262,  8469,   293,   286,  7559]), tensor([  70, 5500,  290, 7165]), tensor([1659, 1099]), tensor([47436,  2714, 23229]), tensor([ 272, 3499, 7243,  837]), tensor([   64,   288,   808,  1837, 10512,  1167,   265]), tensor([ 1169,  2646, 22324,  1146,  3544,   262,  3052]), tensor([8988,  832,  477, 1165]), tensor([30176,   837,  4451,   837, 32800,   837, 13206])], 'attention_mask': [tensor([1, 1, 1, 1, 1, 1, 1]), tensor([1, 1, 1, 1, 1, 1]), tensor([1, 1, 1]), tensor([1, 1, 1, 1]), tensor([1, 1, 1, 1, 1, 1, 1]), tensor([1, 1, 1]), tensor([1, 1]), tensor([1, 1, 1, 1, 1, 1]), tensor([1, 1, 1, 1, 1]), tensor([1, 1, 1, 1, 1, 1]), tensor([1, 1, 1, 1, 1, 1]), tensor([1, 1, 1]), tensor([1, 1, 1]), tensor([1, 1, 1, 1]), tensor([1, 1, 1, 1, 1, 1, 1]), tensor([1, 1, 1, 1, 1, 1]), tensor([1, 1, 1, 1, 1]), tensor([1, 1, 1, 1]), tensor([1, 1, 1, 1]), tensor([1, 1, 1]), tensor([1, 1, 1]), tensor([1, 1, 1, 1, 1, 1, 1]), tensor([1, 1, 1, 1, 1]), tensor([1, 1, 1, 1, 1, 1, 1]), tensor([1, 1, 1, 1]), tensor([1, 1]), tensor([1, 1, 1]), tensor([1, 1, 1, 1]), tensor([1, 1, 1, 1, 1, 1, 1]), tensor([1, 1, 1, 1, 1, 1, 1]), tensor([1, 1, 1, 1]), tensor([1, 1, 1, 1, 1, 1, 1])], 'query': ['an overstylized , pur', 'be exploring the idea of why', 'had no trouble', \", what 's\", 'with its subject matter in a tast', 'a delicious crime', 'this is', \"we 're wrapped up in\", 'two signs that m.', 'too hard to be funny in', \"'m sure there 's a\", \"'s as rude\", '( kline', \", spirit 's\", 'morvern callar confirms lynne', 'manages never to grow boring', 'attempt to bring cohesion', ', paxton ,', 'of the best ensemble', 'report that it', 'hoary dialogue', 'the connected stories of breitbart and', 'half past dead is like', 'none of the crackle of ``', 'guts and crazy', 'of law', 'been held hostage', 'an interesting topic ,', 'a drowsy drama infat', 'the film idiotically uses the website', 'comes through all too', 'sensitive , smart , savvy , compelling']}\n"
     ]
    }
   ],
   "source": [
    "from torch.utils.data import DataLoader\n",
    "\n",
    "batch_size = 32\n",
    "\n",
    "def collator(batch):\n",
    "    return dict((key, [d[key] for d in batch]) for key in batch[0])\n",
    "\n",
    "train_dataloader = DataLoader(tokenized_dataset_train, batch_size=batch_size, collate_fn=collator, shuffle=True)\n",
    "val_dataloader = DataLoader(tokenized_dataset_val, batch_size=batch_size, collate_fn=collator, shuffle=True)\n",
    "\n",
    "batch = next(iter(train_dataloader))\n",
    "print(batch)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "id": "dd98842f-ce02-46ea-847f-f89170541c34",
   "metadata": {},
   "outputs": [],
   "source": [
    "output_min_length = 5\n",
    "output_max_length = 16\n",
    "\n",
    "# https://huggingface.co/docs/trl/how_to_train#how-to-generate-text-for-training\n",
    "# gpt2-sft输出的配置\n",
    "generation_kwargs = {\n",
    "    \"min_length\": -1,\n",
    "    \"top_k\": 0.0,\n",
    "    \"top_p\": 1.0,\n",
    "    \"do_sample\": True,\n",
    "    \"pad_token_id\": tokenizer.pad_token_id\n",
    "}"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "id": "e2c1d298-9a0c-4569-99c4-7075dcd1b618",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "{'input_ids': [17250, 11, 428], 'attention_mask': [1, 1, 1]}\n"
     ]
    }
   ],
   "source": [
    "new_tokens = random.choice(list(range(output_min_length, output_max_length)))\n",
    "generation_kwargs[\"max_new_tokens\"] = new_tokens\n",
    "sample = tokenizer('Hi, this')\n",
    "print(sample)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 19,
   "id": "b7b337c0-3871-440c-89f0-defa1fd699e8",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "tensor([17250,    11,   428,  9439,   318,  1016,   284,   307])\n"
     ]
    }
   ],
   "source": [
    "query_response = model.generate(\n",
    "    input_ids=torch.tensor(sample['input_ids']).unsqueeze(0),\n",
    "    attention_mask=torch.tensor(sample['attention_mask']).unsqueeze(0),\n",
    "    **generation_kwargs\n",
    ").squeeze(0)\n",
    "print(query_response)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 20,
   "id": "23dc76ab-a463-4752-92f2-68803eebd421",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Hi, this tomorrow is going to be\n"
     ]
    }
   ],
   "source": [
    "print(tokenizer.decode(query_response))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 21,
   "id": "97d15ccb-23ea-499b-befb-5126ae494321",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "tensor(0.8867)\n"
     ]
    }
   ],
   "source": [
    "with torch.no_grad():\n",
    "    query_response_score = torch.cat([query_response, torch.tensor([REWARD_TOKEN_ID])])\n",
    "    attention_mask = torch.ones_like(query_response_score, dtype=torch.long)\n",
    "    score = reward_model(\n",
    "        query_response_score.unsqueeze(0),\n",
    "        attention_mask.unsqueeze(0)\n",
    "    ).squeeze(0)[-1]\n",
    "print(score)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 23,
   "id": "4ac4d22f-6f36-495e-817a-850a86bd5f2e",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "['dy somewhat comically twisted version of violence that should',\n",
      " ' do bad films change movies more frequently when',\n",
      " ' believing that pure greek astrology',\n",
      " ' missing from this movie is the genuine excitement .',\n",
      " 'eful and delicately crafted fashion . ',\n",
      " ' comedy that is icky',\n",
      " ' a surprisingly fair game for fans of the spy and at least',\n",
      " ' this sweet , dark , very romantic comedy iced by ace romero',\n",
      " ' m. macdonald lives a well-',\n",
      " ' this genre         ',\n",
      " ' good chance somebody else might be playing',\n",
      " ' as you can get      ',\n",
      " ' ) lacks formal knowledge and is too earnest',\n",
      " ' assignations to perspicacious documentary work      would have',\n",
      " ' williams as a',\n",
      " ' .             ',\n",
      " ' between the acclaimed and beloved cast ',\n",
      " ' jordan and mattel executives make great front seats , but this film',\n",
      " ' cast ia ia ia ia ia .',\n",
      " ' was modeled on real-life events ?\"',\n",
      " \" , raunchy violence , and iani 's happy ending\",\n",
      " ' arnold ershowitz ',\n",
      " ' mediocre entertainment .          ',\n",
      " \" under bogs '' the riot got even fatter -- in fact ,\",\n",
      " ' jokes             ',\n",
      " ' enforcement is a thoughtful , unment',\n",
      " ' for nearly three months by an unknown one    ',\n",
      " ' but not in an engaging way     ',\n",
      " 'uated by haphazard , infantilized theatre-',\n",
      " ' for humor ianw ii ',\n",
      " \" often as a belly-laugh .     ''  REUTERS \",\n",
      " ' and ick family  ersatz    ══ ']\n"
     ]
    }
   ],
   "source": [
    "device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n",
    "model = model.to(device)\n",
    "reward_model = reward_model.to(device)\n",
    "\n",
    "query_tensors = batch['input_ids']\n",
    "query_attention_masks = batch['attention_mask']\n",
    "\n",
    "response_tensors = []\n",
    "query_response_tensors = []\n",
    "score_tensors = []\n",
    "\n",
    "for i, query in enumerate(query_tensors):\n",
    "    query = query.to(device)\n",
    "    query_attention_mask = query_attention_masks[i].to(device)\n",
    "    new_tokens = random.choice(list(range(output_min_length, output_max_length)))\n",
    "    generation_kwargs[\"max_new_tokens\"] = new_tokens\n",
    "    query_response = model.generate(\n",
    "        input_ids=query.unsqueeze(0),\n",
    "        attention_mask=query_attention_mask.unsqueeze(0),\n",
    "        **generation_kwargs\n",
    "    ).squeeze(0)\n",
    "\n",
    "    response_len = len(query_response) - len(query)\n",
    "    response_tensors.append(query_response[-response_len:])\n",
    "    query_response_tensors.append(query_response)\n",
    "\n",
    "    with torch.no_grad():\n",
    "        query_response_score = torch.cat([query_response, torch.tensor([REWARD_TOKEN_ID]).to(device)])\n",
    "        attention_mask = torch.ones_like(query_response_score, dtype=torch.long)\n",
    "        score = reward_model(\n",
    "            query_response_score.unsqueeze(0),\n",
    "            attention_mask.unsqueeze(0)\n",
    "        ).squeeze(0)[-1]\n",
    "        score = 2 * (score - 0.5)\n",
    "    score_tensors.append(score)\n",
    "\n",
    "batch[\"response\"] = [tokenizer.decode(response) for response in response_tensors]\n",
    "from pprint import pprint\n",
    "pprint(batch['response'])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 24,
   "id": "cf419090-0293-4fba-962a-75c9bd1a74c0",
   "metadata": {},
   "outputs": [],
   "source": [
    "from copy import deepcopy\n",
    "sft_model = deepcopy(model)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 25,
   "id": "f42e9f3d-689b-4170-b358-aac96204949e",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "{'input_ids': tensor([[  272,   625,   301,  2645,  1143,   837,  1308,  9892,  6454,   401,\n",
      "          1146, 19074,  2196,   286,  3685,   326,   815, 50256, 50256, 50256,\n",
      "         50256],\n",
      "        [ 1350, 13504,   262,  2126,   286,  1521,   466,  2089,  7328,  1487,\n",
      "          6918,   517,  6777,   618, 50256, 50256, 50256, 50256, 50256, 50256,\n",
      "         50256],\n",
      "        [18108,   645,  5876, 14773,   326,  5899,   308, 10316,  6468, 31142,\n",
      "         50256, 50256, 50256, 50256, 50256, 50256, 50256, 50256, 50256, 50256,\n",
      "         50256],\n",
      "        [   11,   644,   705,    82,  4814,   422,   428,  3807,   318,   262,\n",
      "          8768, 14067,   764, 50256, 50256, 50256, 50256, 50256, 50256, 50256,\n",
      "         50256],\n",
      "        [ 4480,   663,  2426,  2300,   287,   257, 14854, 13839,   290,  8675,\n",
      "          1286, 18025,  6977,   764,   220, 50256, 50256, 50256, 50256, 50256,\n",
      "         50256],\n",
      "        [   64, 12625,  4065, 10997,   326,   318,   220, 17479, 50256, 50256,\n",
      "         50256, 50256, 50256, 50256, 50256, 50256, 50256, 50256, 50256, 50256,\n",
      "         50256],\n",
      "        [ 5661,   318,   257, 12362,  3148,   983,   329,  3296,   286,   262,\n",
      "         13997,   290,   379,  1551, 50256, 50256, 50256, 50256, 50256, 50256,\n",
      "         50256],\n",
      "        [  732,   705,   260, 12908,   510,   287,   428,  6029,   837,  3223,\n",
      "           837,   845, 14348, 10997,   220,  3711,   416, 31506,  9267,  3529,\n",
      "         50256],\n",
      "        [11545,  5895,   326,   285,    13,   285,    13,  8352, 40915,  3160,\n",
      "           257,   880,    12, 50256, 50256, 50256, 50256, 50256, 50256, 50256,\n",
      "         50256],\n",
      "        [18820,  1327,   284,   307,  8258,   287,   428, 12121,   220,   220,\n",
      "           220,   220,   220,   220,   220,   220,   220, 50256, 50256, 50256,\n",
      "         50256],\n",
      "        [ 1101,  1654,   612,   705,    82,   257,   922,  2863,  8276,  2073,\n",
      "          1244,   307,  2712, 50256, 50256, 50256, 50256, 50256, 50256, 50256,\n",
      "         50256],\n",
      "        [  338,   355, 22066,   355,   345,   460,   651,   220,   220,   220,\n",
      "           220,   220,   220, 50256, 50256, 50256, 50256, 50256, 50256, 50256,\n",
      "         50256],\n",
      "        [    7,   479,  1370,  1267, 16523,  8766,  3725,   290,   318,  1165,\n",
      "         23176, 50256, 50256, 50256, 50256, 50256, 50256, 50256, 50256, 50256,\n",
      "         50256],\n",
      "        [   11,  4437,   705,    82,  8333,   602,   284,  2774, 16564, 14209,\n",
      "         11648,   670,   220,   220,   220,   220,   220,   561,   423, 50256,\n",
      "         50256],\n",
      "        [ 4491,   933,   869,   283, 19623, 31432,   710,   481,  1789,    82,\n",
      "           355,   257, 50256, 50256, 50256, 50256, 50256, 50256, 50256, 50256,\n",
      "         50256],\n",
      "        [  805,  1095,  1239,   284,  1663, 14262,   764,   220,   220,   220,\n",
      "           220,   220,   220,   220,   220,   220,   220,   220,   220,   220,\n",
      "         50256],\n",
      "        [ 1078,  1791,   284,  2222, 44182,  1022,   262, 27023,   290, 14142,\n",
      "          3350,   220, 50256, 50256, 50256, 50256, 50256, 50256, 50256, 50256,\n",
      "         50256],\n",
      "        [   11, 14187, 22874,   837,   474,  7350,   290, 23963,   417, 12353,\n",
      "           787,  1049,  2166,  8632,   837,   475,   428,  2646, 50256, 50256,\n",
      "         50256],\n",
      "        [ 1659,   262,  1266, 34549,  3350,   220,   544,   220,   544,   220,\n",
      "           544,   220,   544,   220,   544,   764, 50256, 50256, 50256, 50256,\n",
      "         50256],\n",
      "        [13116,   326,   340,   373, 29563,   319,  1103,    12,  6042,  2995,\n",
      "           220,  1701, 50256, 50256, 50256, 50256, 50256, 50256, 50256, 50256,\n",
      "         50256],\n",
      "        [ 8873,   560, 10721,   837,   374, 11429,    88,  3685,   837,   290,\n",
      "           220, 25111,   705,    82,  3772,  7464, 50256, 50256, 50256, 50256,\n",
      "         50256],\n",
      "        [ 1169,  5884,  3923,   286,  1449, 17868,   290,   610,    77,   727,\n",
      "           220,   364,  4919,  4224,   220, 50256, 50256, 50256, 50256, 50256,\n",
      "         50256],\n",
      "        [13959,  1613,  2636,   318,   588, 29956,  9739,   764,   220,   220,\n",
      "           220,   220,   220,   220,   220,   220,   220,   220, 50256, 50256,\n",
      "         50256],\n",
      "        [23108,   286,   262,  8469,   293,   286,  7559,   739,   275, 18463,\n",
      "         10148,   262, 16352,  1392,   772,   277,  1436,  1377,   287,  1109,\n",
      "           837],\n",
      "        [   70,  5500,   290,  7165, 14532,   220,   220,   220,   220,   220,\n",
      "           220,   220,   220,   220,   220,   220,   220,   220, 50256, 50256,\n",
      "         50256],\n",
      "        [ 1659,  1099,  5394,   318,   257, 22677,   837,   555,   434, 50256,\n",
      "         50256, 50256, 50256, 50256, 50256, 50256, 50256, 50256, 50256, 50256,\n",
      "         50256],\n",
      "        [47436,  2714, 23229,   329,  3016,  1115,  1933,   416,   281,  6439,\n",
      "           530,   220,   220,   220,   220, 50256, 50256, 50256, 50256, 50256,\n",
      "         50256],\n",
      "        [  272,  3499,  7243,   837,   475,   407,   287,   281, 11932,   835,\n",
      "           220,   220,   220,   220,   220, 50256, 50256, 50256, 50256, 50256,\n",
      "         50256],\n",
      "        [   64,   288,   808,  1837, 10512,  1167,   265,  6605,   416,   387,\n",
      "           746, 26267,   837, 11212,   346,  1143, 21421,    12, 50256, 50256,\n",
      "         50256],\n",
      "        [ 1169,  2646, 22324,  1146,  3544,   262,  3052,   329, 14733,   220,\n",
      "           666,    86, 21065,   220, 50256, 50256, 50256, 50256, 50256, 50256,\n",
      "         50256],\n",
      "        [ 8988,   832,   477,  1165,  1690,   355,   257, 19921,    12, 44944,\n",
      "           764,   220,   220,   220,   220, 10148,   220, 15862,   220, 50256,\n",
      "         50256],\n",
      "        [30176,   837,  4451,   837, 32800,   837, 13206,   290,   220,   624,\n",
      "          1641,   220,   220,   364, 27906,   220,   220,   220,   220, 31732,\n",
      "           220]], device='cuda:0'), 'attention_mask': tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0],\n",
      "        [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0],\n",
      "        [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n",
      "        [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0],\n",
      "        [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0],\n",
      "        [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n",
      "        [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0],\n",
      "        [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0],\n",
      "        [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0],\n",
      "        [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0],\n",
      "        [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0],\n",
      "        [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0],\n",
      "        [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n",
      "        [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0],\n",
      "        [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n",
      "        [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0],\n",
      "        [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n",
      "        [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0],\n",
      "        [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0],\n",
      "        [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n",
      "        [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0],\n",
      "        [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0],\n",
      "        [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0],\n",
      "        [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],\n",
      "        [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0],\n",
      "        [1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n",
      "        [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0],\n",
      "        [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0],\n",
      "        [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0],\n",
      "        [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0],\n",
      "        [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0],\n",
      "        [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]],\n",
      "       device='cuda:0')}\n"
     ]
    }
   ],
   "source": [
    "from transformers import DataCollatorWithPadding\n",
    "data_collator = DataCollatorWithPadding(tokenizer=tokenizer)\n",
    "\n",
    "input_data = data_collator([\n",
    "    {'input_ids': ids,\n",
    "     'attention_mask': torch.ones_like(ids)} for ids in query_response_tensors\n",
    "]).to(device)\n",
    "print(input_data)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 83,
   "id": "f8a976f4-a058-4230-b326-83c4376aab7f",
   "metadata": {},
   "outputs": [],
   "source": [
    "def compute_rewards(input_data, query_tensors, response_tensors, score_tensors):\n",
    "    with torch.no_grad():\n",
    "        # 正在微调的模型所输出的token的logits和token的价值\n",
    "        # 模型输出所有token的概率分布\n",
    "        logits, values = model(**input_data) # b, seq, vocab\n",
    "        # 冻结的模型的输出和价值\n",
    "        ref_logits, _ = sft_model(**input_data)\n",
    "        # 正在微调的模型的输出的对数概率\n",
    "        logp = torch.nn.functional.log_softmax(logits[:, :-1, :], dim=-1)\n",
    "        # 冻结的模型的输出的对数概率\n",
    "        ref_logp = torch.nn.functional.log_softmax(ref_logits[:, :-1, :], dim=-1)\n",
    "        # 实际生成的token序列\n",
    "        labels = input_data['input_ids'][:, 1:] # b, seq\n",
    "        # 使用gather提取实际token的概率\n",
    "        logp = torch.gather(logp, 2, labels.unsqueeze(-1)).squeeze(-1) # batch, seq\n",
    "        ref_logp = torch.gather(ref_logp, 2, labels.unsqueeze(-1)).squeeze(-1) # batch, seq\n",
    "        # kl散度\n",
    "        kl = logp - ref_logp\n",
    "        # kl散度的权重\n",
    "        beta = 0.2\n",
    "        # 最终奖励的计算\n",
    "        rewards = - beta * kl\n",
    "        attention_mask = input_data['attention_mask']\n",
    "        masks = torch.zeros_like(attention_mask[:, 1:])\n",
    "        masks[:,:] = attention_mask[:, 1:]\n",
    "        flag = False\n",
    "        for j in range(len(query_tensors)):\n",
    "            start = len(query_tensors[j]) - 1\n",
    "            end = start + len(response_tensors[j])\n",
    "            masks[j, :start] = 0\n",
    "            masks[j, end:] = 0\n",
    "            print(rewards[j])\n",
    "            rewards[j, end - 1] += score_tensors[j]\n",
    "            print(rewards[j])\n",
    "            rewards[j, :] *= masks[j, :]\n",
    "            values[j, :-1] *= masks[j, :]\n",
    "            if not flag:\n",
    "                print(tokenizer.decode(input_data['input_ids'][j]))\n",
    "                print(tokenizer.decode(input_data['input_ids'][j][:-1]))\n",
    "                print(tokenizer.decode(input_data['input_ids'][j][1:]))\n",
    "                print(tokenizer.decode(input_data['input_ids'][j][start:end+1]))\n",
    "                print(tokenizer.decode(input_data['input_ids'][j][1:][end-1]))\n",
    "                print('start: ', start)\n",
    "                print('end: ', end)\n",
    "                flag = True\n",
    "\n",
    "    return logp, rewards, values[:, :-1], masks"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 84,
   "id": "20eba7a4-afaf-4e8a-8f69-e703c4cd0161",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "tensor([-0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0.],\n",
      "       device='cuda:0')\n",
      "tensor([-0.0000, -0.0000, -0.0000, -0.0000, -0.0000, -0.0000, -0.0000, -0.0000,\n",
      "        -0.0000, -0.0000, -0.0000, -0.0000, -0.0000, -0.0000, -0.0000, -0.9143,\n",
      "        -0.0000, -0.0000, -0.0000, -0.0000], device='cuda:0')\n",
      "an overstylized , purdy somewhat comically twisted version of violence that should<|endoftext|><|endoftext|><|endoftext|><|endoftext|>\n",
      "an overstylized , purdy somewhat comically twisted version of violence that should<|endoftext|><|endoftext|><|endoftext|>\n",
      " overstylized , purdy somewhat comically twisted version of violence that should<|endoftext|><|endoftext|><|endoftext|><|endoftext|>\n",
      " purdy somewhat comically twisted version of violence that should\n",
      " should\n",
      "start:  6\n",
      "end:  16\n",
      "tensor([-0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0.],\n",
      "       device='cuda:0')\n",
      "tensor([-0.0000, -0.0000, -0.0000, -0.0000, -0.0000, -0.0000, -0.0000, -0.0000,\n",
      "        -0.0000, -0.0000, -0.0000, -0.0000, -0.6296, -0.0000, -0.0000, -0.0000,\n",
      "        -0.0000, -0.0000, -0.0000, -0.0000], device='cuda:0')\n",
      "tensor([-0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0.],\n",
      "       device='cuda:0')\n",
      "tensor([-0.0000, -0.0000, -0.0000, -0.0000, -0.0000, -0.0000, -0.0000, -0.0000, 0.9828,\n",
      "        -0.0000, -0.0000, -0.0000, -0.0000, -0.0000, -0.0000, -0.0000, -0.0000, -0.0000,\n",
      "        -0.0000, -0.0000], device='cuda:0')\n",
      "tensor([-0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0.],\n",
      "       device='cuda:0')\n",
      "tensor([-0.0000, -0.0000, -0.0000, -0.0000, -0.0000, -0.0000, -0.0000, -0.0000,\n",
      "        -0.0000, -0.0000, -0.0000, -0.8850, -0.0000, -0.0000, -0.0000, -0.0000,\n",
      "        -0.0000, -0.0000, -0.0000, -0.0000], device='cuda:0')\n",
      "tensor([-0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0.],\n",
      "       device='cuda:0')\n",
      "tensor([-0.0000, -0.0000, -0.0000, -0.0000, -0.0000, -0.0000, -0.0000, -0.0000, -0.0000,\n",
      "        -0.0000, -0.0000, -0.0000, -0.0000, 0.9989, -0.0000, -0.0000, -0.0000, -0.0000,\n",
      "        -0.0000, -0.0000], device='cuda:0')\n",
      "tensor([-0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0.],\n",
      "       device='cuda:0')\n",
      "tensor([-0.0000, -0.0000, -0.0000, -0.0000, -0.0000, -0.0000, 0.9869, -0.0000, -0.0000,\n",
      "        -0.0000, -0.0000, -0.0000, -0.0000, -0.0000, -0.0000, -0.0000, -0.0000, -0.0000,\n",
      "        -0.0000, -0.0000], device='cuda:0')\n",
      "tensor([-0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0.],\n",
      "       device='cuda:0')\n",
      "tensor([-0.0000, -0.0000, -0.0000, -0.0000, -0.0000, -0.0000, -0.0000, -0.0000, -0.0000,\n",
      "        -0.0000, -0.0000, -0.0000, 0.9901, -0.0000, -0.0000, -0.0000, -0.0000, -0.0000,\n",
      "        -0.0000, -0.0000], device='cuda:0')\n",
      "tensor([-0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0.],\n",
      "       device='cuda:0')\n",
      "tensor([-0.0000, -0.0000, -0.0000, -0.0000, -0.0000, -0.0000, -0.0000, -0.0000, -0.0000,\n",
      "        -0.0000, -0.0000, -0.0000, -0.0000, -0.0000, -0.0000, -0.0000, -0.0000, -0.0000,\n",
      "        0.9956, -0.0000], device='cuda:0')\n",
      "tensor([-0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0.],\n",
      "       device='cuda:0')\n",
      "tensor([-0.0000, -0.0000, -0.0000, -0.0000, -0.0000, -0.0000, -0.0000, -0.0000, -0.0000,\n",
      "        -0.0000, -0.0000, 0.9501, -0.0000, -0.0000, -0.0000, -0.0000, -0.0000, -0.0000,\n",
      "        -0.0000, -0.0000], device='cuda:0')\n",
      "tensor([-0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0.],\n",
      "       device='cuda:0')\n",
      "tensor([-0.0000, -0.0000, -0.0000, -0.0000, -0.0000, -0.0000, -0.0000, -0.0000,\n",
      "        -0.0000, -0.0000, -0.0000, -0.0000, -0.0000, -0.0000, -0.0000, -0.8645,\n",
      "        -0.0000, -0.0000, -0.0000, -0.0000], device='cuda:0')\n",
      "tensor([-0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0.],\n",
      "       device='cuda:0')\n",
      "tensor([-0.0000, -0.0000, -0.0000, -0.0000, -0.0000, -0.0000, -0.0000, -0.0000, -0.0000,\n",
      "        -0.0000, -0.0000, 0.3464, -0.0000, -0.0000, -0.0000, -0.0000, -0.0000, -0.0000,\n",
      "        -0.0000, -0.0000], device='cuda:0')\n",
      "tensor([-0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0.],\n",
      "       device='cuda:0')\n",
      "tensor([-0.0000, -0.0000, -0.0000, -0.0000, -0.0000, -0.0000, -0.0000, -0.0000,\n",
      "        -0.0000, -0.0000, -0.0000, -0.9848, -0.0000, -0.0000, -0.0000, -0.0000,\n",
      "        -0.0000, -0.0000, -0.0000, -0.0000], device='cuda:0')\n",
      "tensor([-0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0.],\n",
      "       device='cuda:0')\n",
      "tensor([-0.0000, -0.0000, -0.0000, -0.0000, -0.0000, -0.0000, -0.0000, -0.0000,\n",
      "        -0.0000, -0.9490, -0.0000, -0.0000, -0.0000, -0.0000, -0.0000, -0.0000,\n",
      "        -0.0000, -0.0000, -0.0000, -0.0000], device='cuda:0')\n",
      "tensor([-0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0.],\n",
      "       device='cuda:0')\n",
      "tensor([-0.0000, -0.0000, -0.0000, -0.0000, -0.0000, -0.0000, -0.0000, -0.0000, -0.0000,\n",
      "        -0.0000, -0.0000, -0.0000, -0.0000, -0.0000, -0.0000, -0.0000, -0.0000, 0.8379,\n",
      "        -0.0000, -0.0000], device='cuda:0')\n",
      "tensor([-0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0.],\n",
      "       device='cuda:0')\n",
      "tensor([-0.0000, -0.0000, -0.0000, -0.0000, -0.0000, -0.0000, -0.0000, -0.0000, -0.0000,\n",
      "        -0.0000, 0.9305, -0.0000, -0.0000, -0.0000, -0.0000, -0.0000, -0.0000, -0.0000,\n",
      "        -0.0000, -0.0000], device='cuda:0')\n",
      "tensor([-0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0.],\n",
      "       device='cuda:0')\n",
      "tensor([-0.0000, -0.0000, -0.0000, -0.0000, -0.0000, -0.0000, -0.0000, -0.0000, -0.0000,\n",
      "        -0.0000, -0.0000, -0.0000, -0.0000, -0.0000, -0.0000, -0.0000, -0.0000, -0.0000,\n",
      "        0.9556, -0.0000], device='cuda:0')\n",
      "tensor([-0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0.],\n",
      "       device='cuda:0')\n",
      "tensor([-0.0000, -0.0000, -0.0000, -0.0000, -0.0000, -0.0000, -0.0000, -0.0000, -0.0000,\n",
      "        -0.0000, 0.6918, -0.0000, -0.0000, -0.0000, -0.0000, -0.0000, -0.0000, -0.0000,\n",
      "        -0.0000, -0.0000], device='cuda:0')\n",
      "tensor([-0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0.],\n",
      "       device='cuda:0')\n",
      "tensor([-0.0000, -0.0000, -0.0000, -0.0000, -0.0000, -0.0000, -0.0000, -0.0000, -0.0000,\n",
      "        -0.0000, -0.0000, -0.0000, -0.0000, -0.0000, -0.0000, -0.0000, 0.8488, -0.0000,\n",
      "        -0.0000, -0.0000], device='cuda:0')\n",
      "tensor([-0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0.],\n",
      "       device='cuda:0')\n",
      "tensor([-0.0000, -0.0000, -0.0000, -0.0000, -0.0000, -0.0000, -0.0000, -0.0000, -0.0000,\n",
      "        -0.0000, -0.0000, -0.0000, -0.0000, -0.0000, 0.9966, -0.0000, -0.0000, -0.0000,\n",
      "        -0.0000, -0.0000], device='cuda:0')\n",
      "tensor([-0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0.],\n",
      "       device='cuda:0')\n",
      "tensor([-0.0000, -0.0000, -0.0000, -0.0000, -0.0000, -0.0000, -0.0000, -0.0000, -0.0000,\n",
      "        -0.0000, 0.2068, -0.0000, -0.0000, -0.0000, -0.0000, -0.0000, -0.0000, -0.0000,\n",
      "        -0.0000, -0.0000], device='cuda:0')\n",
      "tensor([-0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0.],\n",
      "       device='cuda:0')\n",
      "tensor([-0.0000, -0.0000, -0.0000, -0.0000, -0.0000, -0.0000, -0.0000, -0.0000, -0.0000,\n",
      "        -0.0000, -0.0000, -0.0000, -0.0000, -0.0000, 0.7516, -0.0000, -0.0000, -0.0000,\n",
      "        -0.0000, -0.0000], device='cuda:0')\n",
      "tensor([-0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0.],\n",
      "       device='cuda:0')\n",
      "tensor([-0.0000, -0.0000, -0.0000, -0.0000, -0.0000, -0.0000, -0.0000, -0.0000, -0.0000,\n",
      "        -0.0000, -0.0000, -0.0000, -0.0000, 0.8375, -0.0000, -0.0000, -0.0000, -0.0000,\n",
      "        -0.0000, -0.0000], device='cuda:0')\n",
      "tensor([-0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0.],\n",
      "       device='cuda:0')\n",
      "tensor([-0.0000, -0.0000, -0.0000, -0.0000, -0.0000, -0.0000, -0.0000, -0.0000,\n",
      "        -0.0000, -0.0000, -0.0000, -0.0000, -0.0000, -0.0000, -0.0000, -0.0000,\n",
      "        -0.9887, -0.0000, -0.0000, -0.0000], device='cuda:0')\n",
      "tensor([-0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0.],\n",
      "       device='cuda:0')\n",
      "tensor([-0.0000, -0.0000, -0.0000, -0.0000, -0.0000, -0.0000, -0.0000, -0.0000,\n",
      "        -0.0000, -0.0000, -0.0000, -0.0000, -0.0000, -0.0000, -0.0000, -0.0000,\n",
      "        -0.0000, -0.0000, -0.0000, -0.9577], device='cuda:0')\n",
      "tensor([-0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0.],\n",
      "       device='cuda:0')\n",
      "tensor([-0.0000, -0.0000, -0.0000, -0.0000, -0.0000, -0.0000, -0.0000, -0.0000, -0.0000,\n",
      "        -0.0000, -0.0000, -0.0000, -0.0000, -0.0000, -0.0000, -0.0000, 0.5829, -0.0000,\n",
      "        -0.0000, -0.0000], device='cuda:0')\n",
      "tensor([-0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0.],\n",
      "       device='cuda:0')\n",
      "tensor([-0.0000, -0.0000, -0.0000, -0.0000, -0.0000, -0.0000, -0.0000, 0.9941, -0.0000,\n",
      "        -0.0000, -0.0000, -0.0000, -0.0000, -0.0000, -0.0000, -0.0000, -0.0000, -0.0000,\n",
      "        -0.0000, -0.0000], device='cuda:0')\n",
      "tensor([-0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0.],\n",
      "       device='cuda:0')\n",
      "tensor([-0.0000, -0.0000, -0.0000, -0.0000, -0.0000, -0.0000, -0.0000, -0.0000,\n",
      "        -0.0000, -0.0000, -0.0000, -0.0000, -0.0000, -0.8869, -0.0000, -0.0000,\n",
      "        -0.0000, -0.0000, -0.0000, -0.0000], device='cuda:0')\n",
      "tensor([-0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0.],\n",
      "       device='cuda:0')\n",
      "tensor([-0.0000, -0.0000, -0.0000, -0.0000, -0.0000, -0.0000, -0.0000, -0.0000, -0.0000,\n",
      "        -0.0000, -0.0000, -0.0000, -0.0000, 0.1363, -0.0000, -0.0000, -0.0000, -0.0000,\n",
      "        -0.0000, -0.0000], device='cuda:0')\n",
      "tensor([-0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0.],\n",
      "       device='cuda:0')\n",
      "tensor([-0.0000, -0.0000, -0.0000, -0.0000, -0.0000, -0.0000, -0.0000, -0.0000,\n",
      "        -0.0000, -0.0000, -0.0000, -0.0000, -0.0000, -0.0000, -0.0000, -0.0000,\n",
      "        -0.9565, -0.0000, -0.0000, -0.0000], device='cuda:0')\n",
      "tensor([-0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0.],\n",
      "       device='cuda:0')\n",
      "tensor([-0.0000, -0.0000, -0.0000, -0.0000, -0.0000, -0.0000, -0.0000, -0.0000,\n",
      "        -0.0000, -0.0000, -0.0000, -0.0000, -0.9860, -0.0000, -0.0000, -0.0000,\n",
      "        -0.0000, -0.0000, -0.0000, -0.0000], device='cuda:0')\n",
      "tensor([-0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0.],\n",
      "       device='cuda:0')\n",
      "tensor([-0.0000, -0.0000, -0.0000, -0.0000, -0.0000, -0.0000, -0.0000, -0.0000,\n",
      "        -0.0000, -0.0000, -0.0000, -0.0000, -0.0000, -0.0000, -0.0000, -0.0000,\n",
      "        -0.0000, -0.3436, -0.0000, -0.0000], device='cuda:0')\n",
      "tensor([-0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0., -0.],\n",
      "       device='cuda:0')\n",
      "tensor([-0.0000, -0.0000, -0.0000, -0.0000, -0.0000, -0.0000, -0.0000, -0.0000, -0.0000,\n",
      "        -0.0000, -0.0000, -0.0000, -0.0000, -0.0000, -0.0000, -0.0000, -0.0000, -0.0000,\n",
      "        -0.0000, 0.9972], device='cuda:0')\n"
     ]
    }
   ],
   "source": [
    "logprobs, rewards, values, masks = compute_rewards(input_data, query_tensors, response_tensors, score_tensors)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "6ae21ab4-c5c9-4155-970c-c84083f007bf",
   "metadata": {},
   "outputs": [],
   "source": [
    "def masked_mean(values, mask):\n",
    "    return (values * mask).sum() / mask.sum()\n",
    "\n",
    "def masked_var(values, mask):\n",
    "    mean = masked_mean(values, mask)\n",
    "    centred_values = values - mean\n",
    "    return masked_mean(centred_values ** 2, mask)\n",
    "\n",
    "def masked_whiten(values, mask):\n",
    "    mean, var = masked_mean(values, mask), masked_var(values, mask)\n",
    "    whitened = (values - mean) * torch.rsqrt(var + 1e-8)\n",
    "    whitened += mean\n",
    "    return whitened\n",
    "\n",
    "def compute_advantage(rewards, values, masks):\n",
    "    lastgae = 0.0\n",
    "    advantage_reversed = []\n",
    "    seq_length = rewards.shape[-1]\n",
    "    gamma, lam = 1.0, 0.95\n",
    "\n",
    "    for t in reversed(range(seq_length)):\n",
    "        nextvalues = values[:, t + 1] if t < seq_length - 1 else 0.0\n",
    "        delta = rewards[:, t] + gamma * nextvalues - values[:, t]\n",
    "        lastgae = delta + gamma * lam * lastgae\n",
    "        advantage_reversed.append(lastgae)\n",
    "    advantages = torch.stack(advantage_reversed[::-1], dim=1)\n",
    "    # 归一化一下\n",
    "    advantages = masked_whiten(advantages, masks)\n",
    "\n",
    "    returns = advantages + values\n",
    "    return advantages, returns"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "5a65766e-1f12-4731-8308-53371948cd2d",
   "metadata": {},
   "outputs": [],
   "source": [
    "advantages, returns = compute_advantage(rewards, values, masks)\n",
    "print(advantages[0])\n",
    "print(returns[0])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "b1c8421a-ff5d-49f1-bf41-71e987e2f9b9",
   "metadata": {},
   "outputs": [],
   "source": [
    "learning_rate = 1e-5\n",
    "optimizer = torch.optim.AdamW(model.parameters(), lr=learning_rate)\n",
    "# 重新排列一下各个批次\n",
    "np.random.permutation(batch_size)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "94f26100-9281-4a16-8512-1447c38c55d9",
   "metadata": {},
   "outputs": [],
   "source": [
    "mini_batch_size = 4\n",
    "ppo_epochs = 4\n",
    "\n",
    "cliprange_ratio = 0.2\n",
    "\n",
    "v_loss_coeff = 0.1\n",
    "\n",
    "ratio_threshold = 10\n",
    "\n",
    "def compute_loss(\n",
    "    old_logprobs,\n",
    "    values,\n",
    "    logprobs,\n",
    "    vpreds,\n",
    "    masks,\n",
    "    advantages,\n",
    "    returns\n",
    "):\n",
    "    ratio = torch.exp(logprobs - old_logprobs)\n",
    "    pg_loss1 = - ratio * advantages\n",
    "    pg_loss2 = - torch.clamp(\n",
    "        ratio,\n",
    "        1 - cliprange_ratio,\n",
    "        1 + cliprange_ratio\n",
    "    ) * advantages\n",
    "    pg_loss = masked_mean(torch.max(pg_loss1, pg_loss2), masks)\n",
    "\n",
    "    v_loss = masked_mean((vpreds - returns) ** 2, masks)\n",
    "    loss = pg_loss + v_loss_coeff * v_loss\n",
    "\n",
    "    avg_ratio = masked_mean(ratio, masks)\n",
    "    if avg_ratio > ratio_threshold:\n",
    "        pg_loss = pg_loss * 0.0\n",
    "        v_loss = v_loss * 0.0\n",
    "        loss = loss * 0.0\n",
    "\n",
    "    return loss, v_loss\n",
    "\n",
    "def mini_batch_train():\n",
    "    # 过滤掉输入数据为空的批次\n",
    "    if input_data['input_ids'].shape[0] == 0:\n",
    "        return\n",
    "    for ep in range(ppo_epochs):\n",
    "        batch_inds = np.random.permutation(batch_size)\n",
    "\n",
    "        for start in range(0, batch_size, mini_batch_size):\n",
    "            end = start + mini_batch_size\n",
    "            mini_batch_inds = batch_inds[start:end]\n",
    "\n",
    "            mb_model_inputs = {\n",
    "                'input_ids': input_data['input_ids'][mini_batch_inds],\n",
    "                'attention_mask': input_data['attention_mask'][mini_batch_inds]\n",
    "            }\n",
    "            mb_logits, mb_vpreds = model(**mb_model_inputs)\n",
    "            mb_logits = torch.nn.functional.log_softmax(\n",
    "                mb_logits[:, :-1, :],\n",
    "                dim=-1\n",
    "            )\n",
    "            mb_logprobs = torch.gather(\n",
    "                mb_logits,\n",
    "                2,\n",
    "                mb_model_inputs['input_ids'][:, 1:].unsqueeze(-1)\n",
    "            ).squeeze(-1)\n",
    "\n",
    "            loss, loss_v = compute_loss(\n",
    "                logprobs[mini_batch_inds],\n",
    "                values[mini_batch_inds],\n",
    "                mb_logprobs,\n",
    "                mb_vpreds[:, :-1],\n",
    "                masks[mini_batch_inds],\n",
    "                advantages[mini_batch_inds],\n",
    "                returns[mini_batch_inds]\n",
    "            )\n",
    "\n",
    "            optimizer.zero_grad()\n",
    "            loss.backward()\n",
    "            optimizer.step()\n",
    "            print('loss/total', loss.item())\n",
    "    print('mini-batch training finished')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "ff42d752-7d11-4994-a209-b68648a8a3ed",
   "metadata": {},
   "outputs": [],
   "source": [
    "mini_batch_train()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "e9a11baa-deab-46f1-b18f-6d44d028698c",
   "metadata": {},
   "outputs": [],
   "source": [
    "num_epochs = 1\n",
    "\n",
    "for epoch in range(num_epochs):\n",
    "    for batch in train_dataloader:\n",
    "        # Generate responses\n",
    "        query_tensors = batch['input_ids']\n",
    "        query_attention_masks = batch['attention_mask']\n",
    "\n",
    "        response_tensors = []\n",
    "        query_response_tensors = []\n",
    "        score_tensors = []\n",
    "\n",
    "        for i, query in enumerate(query_tensors):\n",
    "            query = query.to(device)\n",
    "            query_attention_mask = query_attention_masks[i].to(device)\n",
    "            new_tokens = random.choice(list(range(\n",
    "                output_min_length,\n",
    "                output_max_length)))\n",
    "            generation_kwargs[\"max_new_tokens\"] = new_tokens\n",
    "            query_response = model.generate(\n",
    "                input_ids=query.unsqueeze(0),\n",
    "                attention_mask=query_attention_mask.unsqueeze(0),\n",
    "                **generation_kwargs\n",
    "            ).squeeze(0)\n",
    "\n",
    "            response_len = len(query_response) - len(query)\n",
    "            response_tensors.append(query_response[-response_len:])\n",
    "            query_response_tensors.append(query_response)\n",
    "\n",
    "            with torch.no_grad():\n",
    "                query_response_score = torch.cat([\n",
    "                    query_response,\n",
    "                    torch.tensor([REWARD_TOKEN_ID]).to(device)])\n",
    "                attention_mask = torch.ones_like(\n",
    "                    query_response_score,\n",
    "                    dtype=torch.long)\n",
    "                score = reward_model(\n",
    "                    query_response_score.unsqueeze(0),\n",
    "                    attention_mask.unsqueeze(0)\n",
    "                ).squeeze(0)[-1]\n",
    "                score = 2 * (score - 0.5)\n",
    "            score_tensors.append(score)\n",
    "\n",
    "        input_data = data_collator([\n",
    "            {\n",
    "                'input_ids': ids,\n",
    "                'attention_mask': torch.ones_like(ids)\n",
    "            }\n",
    "            for ids in query_response_tensors\n",
    "        ]).to(device)\n",
    "\n",
    "        # 奖励和优势\n",
    "        logprobs, rewards, values, masks = compute_rewards(\n",
    "            input_data,\n",
    "            query_tensors,\n",
    "            response_tensors,\n",
    "            score_tensors\n",
    "        )\n",
    "        advantages, returns = compute_advantage(rewards, values, masks)\n",
    "\n",
    "        # 小批次训练\n",
    "        mini_batch_train()\n",
    "    print(f'epoch {epoch + 1} finished')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "84194dcc-2b3a-4ec2-a6f3-b5c7f5192998",
   "metadata": {},
   "outputs": [],
   "source": [
    "print(len(tokenized_dataset_val))\n",
    "val_gen_lengths = [0] * len(tokenized_dataset_val)\n",
    "for i in range(len(tokenized_dataset_val)):\n",
    "    val_gen_lengths[i] = random.choice(list(range(\n",
    "        output_min_length,\n",
    "        output_max_length)))\n",
    "val_gen_lengths[:10]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "941c8215-dbb6-4233-9730-219c7070cdb7",
   "metadata": {},
   "outputs": [],
   "source": [
    "def validate():\n",
    "    scores = []\n",
    "    for b, batch in enumerate(val_dataloader):\n",
    "        # Generate_responses\n",
    "        query_tensors = batch['input_ids']\n",
    "        query_attention_masks = batch['attention_mask']\n",
    "        for i, query in enumerate(query_tensors):\n",
    "            query = query.to(device)\n",
    "            query_attention_mask = query_attention_masks[i].to(device)\n",
    "            new_tokens = val_gen_lengths[b * len(query_tensors) + i]\n",
    "            generation_kwargs[\"max_new_tokens\"] = new_tokens\n",
    "            query_response = model.generate(\n",
    "                input_ids=query.unsqueeze(0),\n",
    "                attention_mask=query_attention_mask.unsqueeze(0),\n",
    "                **generation_kwargs\n",
    "            ).squeeze(0)\n",
    "            query_response_score = torch.cat([\n",
    "                query_response,\n",
    "                torch.tensor([REWARD_TOKEN_ID]).to(device)])\n",
    "            attention_mask = torch.ones_like(\n",
    "                query_response_score, dtype=torch.long)\n",
    "            score = reward_model(\n",
    "                query_response_score.unsqueeze(0),\n",
    "                attention_mask.unsqueeze(0)\n",
    "            ).squeeze(0)[-1]\n",
    "            score = 2 * (score - 0.5)\n",
    "            scores.append(score.item())\n",
    "    print('平均分数:', sum(scores) / len(scores))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "54bab540-e803-4685-bf83-a13c35473d68",
   "metadata": {},
   "outputs": [],
   "source": [
    "validate()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "3ec434ef-d027-4690-9e34-28e5c5ea2202",
   "metadata": {},
   "outputs": [],
   "source": [
    "torch.save(model.state_dict(), 'gpt2-ppo.pt')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "c66fc3a1-0ce4-4a6f-8553-6d9a80cc4945",
   "metadata": {},
   "outputs": [],
   "source": [
    "model_path = './gpt2-sft'\n",
    "model = ModelForCausalLMWithValueHead(model_path).to(device)\n",
    "validate()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "67e53c1f-7afe-4885-aad6-60420ecc048a",
   "metadata": {},
   "outputs": [],
   "source": [
    "from transformers import pipeline, set_seed\n",
    "from pprint import pprint\n",
    "g = pipeline('text-generation', model='./gpt2-ppo-without-vhead')\n",
    "set_seed(1337)\n",
    "pprint(g(\"Hi, this is all terribly\", max_length=30, num_return_sequences=1))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "0733b1d6-8899-4611-a3d7-f2eabc06eb01",
   "metadata": {},
   "outputs": [],
   "source": [
    "def quick_model_comparison(model1, model2, threshold=1e-6):\n",
    "    \"\"\"快速模型比较\"\"\"\n",
    "    \n",
    "    print(\"⚡ 快速模型比较\")\n",
    "    print(\"=\"*40)\n",
    "    \n",
    "    params1 = dict(model1.named_parameters())\n",
    "    params2 = dict(model2.named_parameters())\n",
    "    common_params = set(params1.keys()) & set(params2.keys())\n",
    "    \n",
    "    identical_count = 0\n",
    "    different_count = 0\n",
    "    significant_diff_count = 0\n",
    "    \n",
    "    for name in common_params:\n",
    "        p1, p2 = params1[name], params2[name]\n",
    "        \n",
    "        if p1.shape != p2.shape:\n",
    "            continue\n",
    "            \n",
    "        diff = torch.max(torch.abs(p1 - p2)).item()\n",
    "        \n",
    "        if diff < 1e-10:\n",
    "            identical_count += 1\n",
    "        elif diff < threshold:\n",
    "            different_count += 1\n",
    "        else:\n",
    "            significant_diff_count += 1\n",
    "            if significant_diff_count <= 5:  # 只显示前5个\n",
    "                print(f\"🔴 显著差异: {name} (最大差异: {diff:.8f})\")\n",
    "    \n",
    "    print(f\"\\n📊 结果统计:\")\n",
    "    print(f\"   完全相同的层: {identical_count}\")\n",
    "    print(f\"   微小差异的层: {different_count}\")\n",
    "    print(f\"   显著差异的层: {significant_diff_count}\")\n",
    "    \n",
    "    if identical_count == len(common_params):\n",
    "        print(\"✅ 两个模型参数完全相同！\")\n",
    "    elif significant_diff_count == 0:\n",
    "        print(\"✅ 两个模型参数基本相同（仅有数值精度差异）\")\n",
    "    else:\n",
    "        print(\"⚠️ 两个模型存在显著参数差异\")\n",
    "    \n",
    "    return {\n",
    "        'identical': identical_count,\n",
    "        'minor_diff': different_count,\n",
    "        'significant_diff': significant_diff_count,\n",
    "        'total_common': len(common_params)\n",
    "    }\n",
    "\n",
    "# 使用示例\n",
    "# result = quick_model_comparison(model, sft_model)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "id": "1ef8c85c-68db-472f-b4f9-74eaccb39609",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "⚡ 快速模型比较\n",
      "========================================\n",
      "🔴 显著差异: transformer.h.3.attn.c_proj.bias (最大差异: 0.00513126)\n",
      "🔴 显著差异: transformer.h.5.mlp.c_fc.weight (最大差异: 0.01644957)\n",
      "🔴 显著差异: transformer.h.8.mlp.c_fc.bias (最大差异: 0.00733607)\n",
      "🔴 显著差异: transformer.h.1.attn.c_attn.bias (最大差异: 0.00756520)\n",
      "🔴 显著差异: transformer.h.9.mlp.c_proj.weight (最大差异: 0.01394147)\n",
      "\n",
      "📊 结果统计:\n",
      "   完全相同的层: 0\n",
      "   微小差异的层: 0\n",
      "   显著差异的层: 148\n",
      "⚠️ 两个模型存在显著参数差异\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "{'identical': 0, 'minor_diff': 0, 'significant_diff': 148, 'total_common': 148}"
      ]
     },
     "execution_count": 4,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "import torch\n",
    "from transformers import AutoModelForCausalLM, AutoTokenizer\n",
    "model1 = AutoModelForCausalLM.from_pretrained('gpt2-sft')\n",
    "model2 = AutoModelForCausalLM.from_pretrained('gpt2-ppo-without-vhead')\n",
    "quick_model_comparison(model1, model2)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "55958bd0-27a8-494a-aff7-2a79859025ef",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.12.7"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
