{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "aabe23a4",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "'S97.85=-86.73+95.16+89.42E'"
      ]
     },
     "execution_count": 1,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "%run common.ipynb\n",
    "\n",
    "tokenizer.decode(tokenizer.get_data(third_number=True))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "0e940d0b",
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "({'input_ids': tensor([[ 1, 15,  5,  ...,  8,  2,  0],\n",
       "          [ 1, 15,  9,  ...,  4,  2,  0],\n",
       "          [ 1, 15,  6,  ...,  8,  2,  0],\n",
       "          ...,\n",
       "          [ 1, 15,  5,  ..., 11,  2,  0],\n",
       "          [ 1,  5,  5,  ...,  6, 11,  2],\n",
       "          [ 1, 15, 12,  ...,  0,  0,  0]], device='cuda:0'),\n",
       "  'attention_mask': tensor([[1, 1, 1,  ..., 1, 1, 0],\n",
       "          [1, 1, 1,  ..., 1, 1, 0],\n",
       "          [1, 1, 1,  ..., 1, 1, 0],\n",
       "          ...,\n",
       "          [1, 1, 1,  ..., 1, 1, 0],\n",
       "          [1, 1, 1,  ..., 1, 1, 1],\n",
       "          [1, 1, 1,  ..., 0, 0, 0]], device='cuda:0'),\n",
       "  'label': tensor([[-100, -100, -100,  ...,    8,    2, -100],\n",
       "          [-100, -100, -100,  ...,    4,    2, -100],\n",
       "          [-100, -100, -100,  ...,    8,    2, -100],\n",
       "          ...,\n",
       "          [-100, -100, -100,  ...,   11,    2, -100],\n",
       "          [-100, -100, -100,  ...,    6,   11,    2],\n",
       "          [-100, -100, -100,  ..., -100, -100, -100]], device='cuda:0')},\n",
       " {'input_ids': tensor([[ 1, 15,  5,  ...,  0,  0,  0],\n",
       "          [ 1, 15,  9,  ...,  0,  0,  0],\n",
       "          [ 1, 15,  6,  ...,  0,  0,  0],\n",
       "          ...,\n",
       "          [ 1, 15,  5,  ...,  0,  0,  0],\n",
       "          [ 1,  5,  5,  ...,  0,  0,  0],\n",
       "          [ 1, 15, 12,  ...,  0,  0,  0]], device='cuda:0'),\n",
       "  'attention_mask': tensor([[1, 1, 1,  ..., 0, 0, 0],\n",
       "          [1, 1, 1,  ..., 0, 0, 0],\n",
       "          [1, 1, 1,  ..., 0, 0, 0],\n",
       "          ...,\n",
       "          [1, 1, 1,  ..., 0, 0, 0],\n",
       "          [1, 1, 1,  ..., 0, 0, 0],\n",
       "          [1, 1, 1,  ..., 0, 0, 0]], device='cuda:0'),\n",
       "  'label': tensor([[-100, -100, -100,  ..., -100, -100, -100],\n",
       "          [-100, -100, -100,  ..., -100, -100, -100],\n",
       "          [-100, -100, -100,  ..., -100, -100, -100],\n",
       "          ...,\n",
       "          [-100, -100, -100,  ..., -100, -100, -100],\n",
       "          [-100, -100, -100,  ..., -100, -100, -100],\n",
       "          [-100, -100, -100,  ..., -100, -100, -100]], device='cuda:0')})"
      ]
     },
     "execution_count": 2,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "def get_batch_data():\n",
    "\n",
    "    def pad(data, split, lens):\n",
    "        #做个白板\n",
    "        input_ids = torch.full((len(data), lens),\n",
    "                               tokenizer.encoder['P'],\n",
    "                               device=device)\n",
    "\n",
    "        #往白板里黏贴数据\n",
    "        for i, d in enumerate(data):\n",
    "            input_ids[i, :len(d)] = torch.LongTensor(d)\n",
    "\n",
    "        attention_mask = (input_ids != tokenizer.encoder['P']).long()\n",
    "\n",
    "        #计算label\n",
    "        label = input_ids.clone()\n",
    "        for l, s in zip(label, split):\n",
    "            #问题和pad的位置是-100\n",
    "            l[:s] = -100\n",
    "            l[l == tokenizer.encoder['P']] = -100\n",
    "\n",
    "        return {\n",
    "            'input_ids': input_ids,\n",
    "            'attention_mask': attention_mask,\n",
    "            'label': label\n",
    "        }\n",
    "\n",
    "    #正确的问答\n",
    "    choice = [tokenizer.get_data(third_number=True) for i in range(64)]\n",
    "\n",
    "    #错误的回答简单地定义为空回答就可以了\n",
    "    split = [i.index(tokenizer.encoder['=']) + 1 for i in choice]\n",
    "    reject = [d[:s] for d, s in zip(choice, split)]\n",
    "    reject = [i + [tokenizer.encoder['E']] for i in reject]\n",
    "\n",
    "    #求最大长度\n",
    "    lens = max([len(i) for i in choice])\n",
    "\n",
    "    return pad(choice, split, lens), pad(reject, split, lens)\n",
    "\n",
    "\n",
    "get_batch_data()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "id": "b14b5c41",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/root/anaconda3/envs/pt2/lib/python3.10/site-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n",
      "  from .autonotebook import tqdm as notebook_tqdm\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "ModelGEN(\n",
       "  (feature): LlamaModel(\n",
       "    (embed_tokens): Embedding(22, 64)\n",
       "    (layers): ModuleList(\n",
       "      (0-3): 4 x LlamaDecoderLayer(\n",
       "        (self_attn): LlamaAttention(\n",
       "          (q_proj): Linear(in_features=64, out_features=64, bias=False)\n",
       "          (k_proj): Linear(in_features=64, out_features=64, bias=False)\n",
       "          (v_proj): Linear(in_features=64, out_features=64, bias=False)\n",
       "          (o_proj): Linear(in_features=64, out_features=64, bias=False)\n",
       "          (rotary_emb): LlamaRotaryEmbedding()\n",
       "        )\n",
       "        (mlp): LlamaMLP(\n",
       "          (gate_proj): Linear(in_features=64, out_features=64, bias=False)\n",
       "          (up_proj): Linear(in_features=64, out_features=64, bias=False)\n",
       "          (down_proj): Linear(in_features=64, out_features=64, bias=False)\n",
       "          (act_fn): SiLUActivation()\n",
       "        )\n",
       "        (input_layernorm): LlamaRMSNorm()\n",
       "        (post_attention_layernorm): LlamaRMSNorm()\n",
       "      )\n",
       "    )\n",
       "    (norm): LlamaRMSNorm()\n",
       "  )\n",
       "  (fc_out): Linear(in_features=64, out_features=22, bias=False)\n",
       ")"
      ]
     },
     "execution_count": 3,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "model_dpo = torch.load('gen.model')\n",
    "model_dpo.to(device)\n",
    "model_dpo.train()\n",
    "\n",
    "model_dpo_ref = torch.load('gen.model')\n",
    "model_dpo_ref.to(device)\n",
    "model_dpo_ref.train()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "id": "c35e3b28",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([-39.9965, -35.2345, -36.4901, -38.7378, -40.6091, -34.2576, -42.3944,\n",
       "        -33.0563, -34.9951, -41.1052, -31.9088, -49.1035, -36.8061, -44.1473,\n",
       "        -49.0926, -34.7165, -37.8078, -43.8499, -38.8814, -36.5278, -37.6655,\n",
       "        -35.2453, -33.1654, -32.6738, -31.2117, -42.3589, -42.8478, -37.1372,\n",
       "        -35.1684, -35.3945, -41.5367, -38.2373, -39.0609, -39.1670, -42.1039,\n",
       "        -38.2436, -35.9996, -38.0521, -34.7511, -34.5437, -54.1977, -32.8421,\n",
       "        -40.8836, -36.5542, -34.1246, -34.0929, -36.6590, -31.7397, -29.0101,\n",
       "        -35.5826, -40.0470, -40.6871, -38.7898, -36.2228, -34.8132, -40.4356,\n",
       "        -36.8790, -42.0786, -31.5971, -44.0202, -33.5269, -45.4662, -35.5565,\n",
       "        -34.9628], device='cuda:0', grad_fn=<SubBackward0>)"
      ]
     },
     "execution_count": 4,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "def get_prob_log(model, choice, reject):\n",
    "    b = choice['input_ids'].shape[0]\n",
    "\n",
    "    #合并两部分输入,同时计算以提高效率\n",
    "    #[b, 21]\n",
    "    input_ids = torch.cat([choice['input_ids'], reject['input_ids']], dim=0)\n",
    "    attention_mask = torch.cat(\n",
    "        [choice['attention_mask'], reject['attention_mask']], dim=0)\n",
    "    label = torch.cat([choice['label'], reject['label']], dim=0)\n",
    "\n",
    "    #[b, 21, 28]\n",
    "    out = model(input_ids=input_ids, attention_mask=attention_mask)\n",
    "\n",
    "    #偏移以对齐\n",
    "    #[b, 20]\n",
    "    label = label[:, 1:]\n",
    "    #[b, 20, 28]\n",
    "    out = out[:, :-1]\n",
    "\n",
    "    #取所有字的预测概率,因为要求联合概率,所以取对数\n",
    "    out = (out.softmax(2) + 1e-8).log()\n",
    "\n",
    "    #取预测到label的概率\n",
    "    #索引不能是负数,所以这里把负数置0\n",
    "    index = label.clone().unsqueeze(2)\n",
    "    index[index == -100] = 0\n",
    "    prob = out.gather(2, index=index).squeeze(2)\n",
    "\n",
    "    #只取答案部分的loss,筛选后,所有答案的概率对数求和\n",
    "    prob = (prob * (label != -100)).sum(1)\n",
    "\n",
    "    #choice和reject的预测概率求差\n",
    "    return prob[:b] - prob[b:]\n",
    "\n",
    "\n",
    "get_prob_log(model_dpo, *get_batch_data())"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "id": "fb049323",
   "metadata": {
    "scrolled": false
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "0 S101.71=95.85+46.75E\n",
      "2000 S3403.86=44.89*83.31+51.47E\n",
      "4000 S55.84=66.64/-91.22+57.96E\n",
      "6000 S-767.08=-69.40*16.22+57.81E\n",
      "8000 S162.65=80.69--83.36+39.43E\n",
      "10000 S16.15=-33.63+-57.77+90.64E\n",
      "12000 S-68.70=58.57/31.96+-60.09E\n",
      "14000 S-51.03=-45.60+47.22+-49.87E\n",
      "16000 S-51.65=-96.99/88.35+-50.66E\n",
      "18000 S30.26=-85.70/44.87+32.50E\n",
      "20000 S2164.24=47.01*51.23+-71.68E\n",
      "22000 S1113.01=-95.91*-11.21+66.08E\n",
      "24000 S1202.65=68.13*17.33+81.90E\n",
      "26000 S-33.20=-52.21*4.71+32.87E\n",
      "28000 S-5469.87=-63.48*78.04+93.87E\n",
      "30000 S-25.27=66.50/-4.51+-12.73E\n",
      "32000 S31.61=-77.76/24.99+34.24E\n",
      "34000 S-2575.25=-43.91*56.71+90.36E\n",
      "36000 S100.97=89.83+-1.77+13.94E\n",
      "38000 S-314.49=-27.89*21.38+-85.69E\n",
      "40000 S-133.87=-64.75+-43.61+-21.59E\n",
      "42000 S-54.38=-44.74+-60.99+54.11E\n",
      "44000 S-100.67=-81.50--52.17+-79.47E\n",
      "46000 S54.37=4.65--96.74+-44.74E\n",
      "48000 S-87.06=8.80--36.98+-99.28E\n",
      "50000 S-127.15=4.63+-96.55+-46.92E\n",
      "52000 S29.78=-32.81+-12.72+71.03E\n",
      "54000 S47.74=-37.14/13.68+50.34E\n",
      "56000 S-451.05=-57.42*6.12+-30.62E\n",
      "58000 S-214.12=-51.91+-73.47+-90.49E\n",
      "60000 S-190.42=-26.75-92.20+-71.75E\n",
      "62000 S32.17=1.49-2.31+35.90E\n",
      "64000 S-129.23=-85.47+-31.55+-12.58E\n",
      "66000 S-1355.33=-72.28*18.62+-11.91E\n",
      "68000 S-45.40=-11.20+39.81+-72.09E\n",
      "70000 S179.98=82.25--50.66+47.76E\n",
      "72000 S-111.60=-97.78--47.64+-60.38E\n",
      "74000 S-571.16=-68.62*8.74+13.16E\n",
      "76000 S-35.64=38.59+-62.27+-11.37E\n",
      "78000 S254.23=82.64+98.11+67.75E\n",
      "80000 S-36.61=-24.28+39.57+-49.08E\n",
      "82000 S89.79=-1.31/-87.82+89.40E\n",
      "84000 S-88.96=69.60/87.36+-89.15E\n",
      "86000 S75.96=35.99/-8.33+81.43E\n",
      "88000 S152.49=86.12--95.23+-32.62E\n",
      "90000 S-3129.25=-82.83*36.74+77.79E\n",
      "92000 S-117.82=-10.25+-37.28+-68.25E\n",
      "94000 S-161.25=-78.26+-60.95+-26.39E\n",
      "96000 S89.60=69.52+63.09+-42.26E\n",
      "98000 S653.50=75.07*7.21+8.89E\n",
      "100000 S-6.98=-34.43/-9.55+-10.54E\n",
      "102000 S111.73=30.65*8.32+-85.59E\n",
      "104000 S-2031.96=-35.95*57.56+64.16E\n",
      "106000 S30.43=4.41-32.30+61.42E\n",
      "108000 S-38.75=-10.96+33.25+-59.84E\n",
      "110000 S196.23=47.30--86.87+64.16E\n",
      "112000 S255.10=80.16+89.92+82.48E\n",
      "114000 S-981.37=-67.27*13.36+-94.50E\n",
      "116000 S63.78=-96.11/92.51+64.77E\n",
      "118000 S-72.39=13.47/-44.47+-71.77E\n",
      "120000 S16.74=-58.43+-19.96+96.51E\n",
      "122000 S-5764.99=96.02*-61.54+42.40E\n",
      "124000 S186.06=56.78*1.26+89.35E\n",
      "126000 S-47.30=-78.25/-94.62+-48.79E\n",
      "128000 S-88.34=-31.47/-96.69+-88.40E\n",
      "130000 S-26.96=-83.99-4.81+61.09E\n",
      "132000 S-178.17=-93.13+-26.74+-57.86E\n",
      "134000 S46.42=4.93--83.85+-44.69E\n",
      "136000 S-18.38=-56.29-20.27+56.99E\n",
      "138000 S-91.72=-4.83-10.79+-75.67E\n",
      "140000 S-98.35=50.36+-92.44+-55.10E\n",
      "142000 S132.20=42.82+12.10+76.84E\n",
      "144000 S-23.55=-1.05-70.08+50.88E\n",
      "146000 S4163.16=98.89*42.55+-65.05E\n",
      "148000 S2.76=-17.90/-11.65+0.39E\n",
      "150000 S-5.25=46.78/68.82+-5.31E\n",
      "152000 S-12.24=-60.32/73.52+-11.30E\n",
      "154000 S-49.86=-78.58+71.63+-41.52E\n",
      "156000 S3165.32=94.22*33.59+-7.98E\n",
      "158000 S-1698.60=21.45*-81.96+94.76E\n",
      "160000 S196.85=81.90--86.63+30.87E\n",
      "162000 S1281.46=18.06*74.30+-65.71E\n",
      "164000 S-147.64=-53.33-95.61+1.50E\n",
      "166000 S-1639.97=28.73*-57.07+18.33E\n",
      "168000 S100.91=16.39+12.98+68.96E\n",
      "170000 S-63.10=-49.48+39.54+-53.01E\n",
      "172000 S-97.32=-9.24-27.97+-59.72E\n",
      "174000 S122.62=12.27+72.14+36.86E\n",
      "176000 S26.37=33.95/61.34+25.40E\n",
      "178000 S-100.12=57.53/-48.50+-99.32E\n",
      "180000 S-53.98=18.08-40.57+-31.69E\n",
      "182000 S-2595.04=-78.73*32.99+54.03E\n",
      "184000 S26.96=-0.47+91.37+-63.09E\n",
      "186000 S90.24=-1.59/10.38+90.78E\n",
      "188000 S44.72=45.66+80.34+-78.25E\n",
      "190000 S83.12=-50.42--75.60+57.17E\n",
      "192000 S-8.79=10.58+-99.26+77.74E\n",
      "194000 S117.00=78.40+2.21+36.48E\n",
      "196000 S122.16=88.95-23.52+56.60E\n",
      "198000 S-36.82=-7.02*23.49+71.74E\n"
     ]
    }
   ],
   "source": [
    "optimizer = torch.optim.Adam(model_dpo.parameters(),\n",
    "                             lr=1e-4,\n",
    "                             betas=(0.9, 0.999),\n",
    "                             eps=1e-8)\n",
    "\n",
    "for i in range(20_0000):\n",
    "    choice, reject = get_batch_data()\n",
    "\n",
    "    #两个模型分别计算概率对数\n",
    "    prob_log = get_prob_log(model_dpo, choice, reject)\n",
    "    with torch.no_grad():\n",
    "        prob_log_ref = get_prob_log(model_dpo_ref, choice, reject)\n",
    "\n",
    "    #两份概率计算kl散度\n",
    "    kl = -0.1 * (prob_log - prob_log_ref)\n",
    "\n",
    "    #以kl散度计算loss\n",
    "    loss = (kl.sigmoid() + 1e-8).log().mean()\n",
    "    loss.backward()\n",
    "    optimizer.step()\n",
    "    optimizer.zero_grad()\n",
    "\n",
    "    if i % 2000 == 0:\n",
    "        question = tokenizer.get_data(third_number=True)\n",
    "        question = question[:question.index(tokenizer.encoder['=']) + 1]\n",
    "        question = torch.LongTensor(question).unsqueeze(0).to(device)\n",
    "\n",
    "        gen = generate(model_dpo, question)\n",
    "        print(i, tokenizer.decode(gen[0].tolist()))\n",
    "\n",
    "model_dpo.to('cpu')\n",
    "torch.save(model_dpo, 'dpo.model')"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python [conda env:pt2]",
   "language": "python",
   "name": "conda-env-pt2-py"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.10.13"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
