{
 "cells": [
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-01-15T18:26:22.884644Z",
     "start_time": "2025-01-15T18:26:18.449883Z"
    }
   },
   "cell_type": "code",
   "source": "import torch",
   "id": "7999828180d8f258",
   "outputs": [],
   "execution_count": 1
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-01-15T18:26:22.893438Z",
     "start_time": "2025-01-15T18:26:22.888553Z"
    }
   },
   "cell_type": "code",
   "source": "# rotateMatrix = torch.load('1-10-1 (1).pth', weights_only=True)",
   "id": "604e2857af1ad8b4",
   "outputs": [],
   "execution_count": 2
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-01-15T18:26:22.983335Z",
     "start_time": "2025-01-15T18:26:22.978449Z"
    }
   },
   "cell_type": "code",
   "source": "# print(rotateMatrix.dtype)",
   "id": "5f03c00890f11db0",
   "outputs": [],
   "execution_count": 3
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-01-15T18:26:33.294163Z",
     "start_time": "2025-01-15T18:26:22.997016Z"
    }
   },
   "cell_type": "code",
   "source": [
    "from transformers import LlamaForCausalLM, LlamaTokenizer\n",
    "import torch\n",
    "from utils.model_path_getter import load_yaml\n",
    "\n",
    "# ✅ 加载模型路径\n",
    "model_path = load_yaml()[\"model_path\"]\n",
    "offload_path = load_yaml()[\"offload_path\"]\n",
    "\n",
    "# ✅ 加载 tokenizer\n",
    "tokenizer = LlamaTokenizer.from_pretrained(model_path)\n",
    "\n",
    "# ✅ 加载量化模型\n",
    "model = LlamaForCausalLM.from_pretrained(\n",
    "    model_path,\n",
    "    device_map=\"auto\",  # 自动分配设备\n",
    "    offload_folder=offload_path,\n",
    "    torch_dtype=torch.float16,\n",
    ")"
   ],
   "id": "3cec739d2bb940cf",
   "outputs": [
    {
     "data": {
      "text/plain": [
       "Loading checkpoint shards:   0%|          | 0/2 [00:00<?, ?it/s]"
      ],
      "application/vnd.jupyter.widget-view+json": {
       "version_major": 2,
       "version_minor": 0,
       "model_id": "59e3f2134d58401abde104873a90115a"
      }
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "WARNING:root:Some parameters are on the meta device device because they were offloaded to the disk and cpu.\n"
     ]
    }
   ],
   "execution_count": 4
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-01-15T20:15:33.943005Z",
     "start_time": "2025-01-15T20:15:33.148142Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# ✅ 加载旋转矩阵并转换为 float32\n",
    "rotateMatrix = (torch.load('../1-10-1 (1).pth', weights_only=True)\n",
    "                .to(torch.float16)\n",
    "                .to(torch.float32)\n",
    "                .to(torch.device('cuda')))"
   ],
   "id": "5df67de5f2c330e9",
   "outputs": [],
   "execution_count": 2
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-01-15T18:26:34.478067Z",
     "start_time": "2025-01-15T18:26:34.473180Z"
    }
   },
   "cell_type": "code",
   "source": "print(rotateMatrix.dtype)",
   "id": "bca46a461036ac6b",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "torch.float32\n"
     ]
    }
   ],
   "execution_count": 6
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-01-15T20:15:37.490730Z",
     "start_time": "2025-01-15T20:15:36.183262Z"
    }
   },
   "cell_type": "code",
   "source": [
    "identity_matrix = torch.eye(rotateMatrix.size(0)).to(rotateMatrix.device)\n",
    "roted = torch.matmul(rotateMatrix, rotateMatrix.T)\n",
    "\n",
    "# 计算绝对误差\n",
    "absolute_error = torch.abs(roted - identity_matrix)\n",
    "\n",
    "# 找到最大差值的位置\n",
    "max_diff_index = torch.argmax(absolute_error)\n",
    "\n",
    "# 打印最大差值\n",
    "print(f\"Max absolute error: {absolute_error.max().item()}\")"
   ],
   "id": "51a09a40c2d61a17",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Max absolute error: 0.0002695322036743164\n"
     ]
    }
   ],
   "execution_count": 3
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-01-15T18:27:30.610254Z",
     "start_time": "2025-01-15T18:26:45.577654Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# 🔍 检查旋转矩阵的性质\n",
    "identity_matrix = torch.eye(rotateMatrix.size(0)).to(rotateMatrix.device)\n",
    "\n",
    "# 正交性检查\n",
    "is_orthogonal = torch.allclose(torch.matmul(rotateMatrix, rotateMatrix.T), identity_matrix, atol=5e-4)\n",
    "print(f\"Is rotateMatrix orthogonal? {is_orthogonal}\")\n",
    "\n",
    "# 行列式检查\n",
    "determinant = torch.det(rotateMatrix).item()\n",
    "print(f\"Determinant of rotateMatrix: {determinant}\")\n",
    "\n",
    "# 逆矩阵和转置矩阵比较\n",
    "inverse_matrix = torch.linalg.inv(rotateMatrix)\n",
    "is_inverse_equal_transpose = torch.allclose(inverse_matrix, rotateMatrix.T, atol=1e-3)\n",
    "print(f\"Is the inverse of rotateMatrix equal to its transpose? {is_inverse_equal_transpose}\")\n",
    "\n",
    "# 打印旋转矩阵的前 5 行\n",
    "print(\"First 5 rows of rotateMatrix:\")\n",
    "print(rotateMatrix[:5, :5])\n",
    "\n"
   ],
   "id": "3ab5b17346d0e62c",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Is rotateMatrix orthogonal? True\n",
      "Determinant of rotateMatrix: 0.9999485015869141\n",
      "Is the inverse of rotateMatrix equal to its transpose? True\n",
      "First 5 rows of rotateMatrix:\n",
      "tensor([[ 0.0056,  0.0010, -0.0062,  0.0225,  0.0089],\n",
      "        [-0.0049, -0.0059,  0.0020,  0.0063,  0.0036],\n",
      "        [ 0.0027, -0.0140, -0.0096, -0.0039,  0.0015],\n",
      "        [ 0.0082, -0.0058,  0.0140, -0.0012, -0.0001],\n",
      "        [ 0.0043,  0.0186, -0.0054, -0.0123, -0.0029]], device='cuda:0',\n",
      "       grad_fn=<SliceBackward0>)\n"
     ]
    }
   ],
   "execution_count": 8
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-01-15T18:27:30.642499Z",
     "start_time": "2025-01-15T18:27:30.637613Z"
    }
   },
   "cell_type": "code",
   "source": "rotateMatrix = rotateMatrix.to(torch.float16)",
   "id": "d9bf77c81fe6de70",
   "outputs": [],
   "execution_count": 9
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-01-15T18:27:32.185471Z",
     "start_time": "2025-01-15T18:27:30.823800Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# ✅ 修改 down_proj 的权重和输入值\n",
    "for i, layer in enumerate(model.model.layers):\n",
    "    if i == 0:\n",
    "        down_proj = layer.mlp.down_proj\n",
    "        original_weight = down_proj.weight.data\n",
    "        \n",
    "        print(original_weight.dtype)\n",
    "        \n",
    "        # 打印原始权重的均值和标准差\n",
    "        print(f\"Original weight mean: {original_weight.mean().item()}, std: {original_weight.std().item()}\")\n",
    "\n",
    "        # ✅ 修改权重：插入反旋转矩阵\n",
    "        modified_weight = torch.matmul(original_weight, rotateMatrix.T)\n",
    "        down_proj.weight.data = modified_weight\n",
    "\n",
    "        # 打印修改后的权重均值和标准差\n",
    "        print(f\"Modified weight mean: {down_proj.weight.data.mean().item()}, std: {down_proj.weight.data.std().item()}\")\n"
   ],
   "id": "59586802607d4fdb",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "torch.float16\n",
      "Original weight mean: -2.0265579223632812e-06, std: 0.016815185546875\n",
      "Modified weight mean: -2.384185791015625e-07, std: 0.016815185546875\n"
     ]
    }
   ],
   "execution_count": 10
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-01-15T18:27:32.371128Z",
     "start_time": "2025-01-15T18:27:32.363311Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# ✅ 钩子函数：修改 down_proj 的输入值\n",
    "def modify_down_proj_input(module, input):\n",
    "    original_input = input[0]\n",
    "    modified_input = torch.matmul(original_input, rotateMatrix)\n",
    "\n",
    "    # 打印修改前后的输入张量信息\n",
    "    print(f\"Original input mean: {original_input.mean().item()}, std: {original_input.std().item()}\")\n",
    "    print(f\"Modified input mean: {modified_input.mean().item()}, std: {modified_input.std().item()}\")\n",
    "\n",
    "    return (modified_input,)"
   ],
   "id": "1df9ce2c32a4fe17",
   "outputs": [],
   "execution_count": 11
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-01-15T18:39:02.102295Z",
     "start_time": "2025-01-15T18:27:32.548529Z"
    }
   },
   "cell_type": "code",
   "source": [
    "\n",
    "# ✅ 注册钩子到 down_proj 层\n",
    "model.model.layers[0].mlp.down_proj.register_forward_pre_hook(modify_down_proj_input)\n",
    "\n",
    "# ✅ 准备输入数据\n",
    "prompts = [\n",
    "    \"Artificial intelligence is transforming the way we interact with technology.\"\n",
    "]\n",
    "inputs = tokenizer(prompts, return_tensors=\"pt\", truncation=True, max_length=128)\n",
    "\n",
    "# ✅ 确保输入张量在正确的设备上并转换为 float16\n",
    "inputs = {key: value.to(model.device) for key, value in inputs.items()}\n",
    "inputs[\"input_ids\"] = inputs[\"input_ids\"].to(torch.long)\n",
    "inputs[\"labels\"] = inputs[\"input_ids\"]\n",
    "\n",
    "# ✅ 禁用梯度计算，进行推理\n",
    "with torch.no_grad():\n",
    "    outputs = model(**inputs)\n",
    "\n",
    "# ✅ 计算 PPL\n",
    "loss = outputs.loss / inputs[\"input_ids\"].size(1)  # 按 token 数量归一化\n",
    "ppl = torch.exp(loss).item()\n",
    "print(f\"Perplexity: {ppl}\")\n",
    "\n",
    "loss = outputs.loss  # 按 token 数量归一化\n",
    "ppl = torch.exp(loss).item()\n",
    "print(f\"Perplexity: {ppl}\")\n",
    "\n",
    "# ✅ 生成文本\n",
    "with torch.no_grad():\n",
    "    generated_ids = model.generate(\n",
    "        inputs[\"input_ids\"]\n",
    "    )\n",
    "\n",
    "# ✅ 解码生成的文本\n",
    "generated_texts = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)\n",
    "\n",
    "print(\"prompts:\", prompts)\n",
    "# ✅ 打印生成的文本\n",
    "print(\"Generated text:\", generated_texts)\n"
   ],
   "id": "a340d3a9a66061df",
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "We detected that you are passing `past_key_values` as a tuple and this is deprecated and will be removed in v4.43. Please use an appropriate `Cache` class (https://huggingface.co/docs/transformers/v4.41.3/en/internal/generation_utils#transformers.Cache)\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Original input mean: -8.821487426757812e-06, std: 0.011688232421875\n",
      "Modified input mean: 1.5914440155029297e-05, std: 0.01168060302734375\n",
      "Perplexity: 1.5194346904754639\n",
      "Perplexity: 531.1659545898438\n",
      "Original input mean: -8.821487426757812e-06, std: 0.011688232421875\n",
      "Modified input mean: 1.5914440155029297e-05, std: 0.01168060302734375\n",
      "Original input mean: -3.707408905029297e-05, std: 0.005428314208984375\n",
      "Modified input mean: 2.4437904357910156e-06, std: 0.005428314208984375\n",
      "Original input mean: -5.4955482482910156e-05, std: 0.00617218017578125\n",
      "Modified input mean: 2.467632293701172e-05, std: 0.00617218017578125\n",
      "Original input mean: 2.2113323211669922e-05, std: 0.005016326904296875\n",
      "Modified input mean: 7.009506225585938e-05, std: 0.00501251220703125\n",
      "Original input mean: -6.097555160522461e-05, std: 0.0048980712890625\n",
      "Modified input mean: 1.1324882507324219e-05, std: 0.0048980712890625\n",
      "Original input mean: 3.892183303833008e-05, std: 0.00479888916015625\n",
      "Modified input mean: 4.2319297790527344e-05, std: 0.00479888916015625\n",
      "Original input mean: -6.091594696044922e-05, std: 0.005077362060546875\n",
      "Modified input mean: -3.635883331298828e-06, std: 0.005077362060546875\n",
      "Original input mean: -1.913309097290039e-05, std: 0.004161834716796875\n",
      "Modified input mean: -1.5437602996826172e-05, std: 0.004161834716796875\n",
      "Original input mean: -5.441904067993164e-05, std: 0.004150390625\n",
      "Modified input mean: -3.933906555175781e-06, std: 0.004150390625\n",
      "Original input mean: -5.167722702026367e-05, std: 0.00527191162109375\n",
      "Modified input mean: 9.5367431640625e-06, std: 0.00527191162109375\n",
      "Original input mean: 6.54458999633789e-05, std: 0.005008697509765625\n",
      "Modified input mean: 2.6941299438476562e-05, std: 0.005008697509765625\n",
      "Original input mean: 7.587671279907227e-05, std: 0.0051422119140625\n",
      "Modified input mean: 4.172325134277344e-06, std: 0.0051422119140625\n",
      "Original input mean: 2.6047229766845703e-05, std: 0.0030841827392578125\n",
      "Modified input mean: -1.0371208190917969e-05, std: 0.0030841827392578125\n",
      "Original input mean: 2.2649765014648438e-06, std: 0.0031280517578125\n",
      "Modified input mean: 2.7418136596679688e-06, std: 0.0031280517578125\n",
      "Original input mean: 6.192922592163086e-05, std: 0.00506591796875\n",
      "Modified input mean: -7.510185241699219e-06, std: 0.00506591796875\n",
      "Original input mean: 4.291534423828125e-06, std: 0.0027923583984375\n",
      "Modified input mean: -1.2934207916259766e-05, std: 0.0027923583984375\n",
      "Original input mean: -6.836652755737305e-05, std: 0.00518035888671875\n",
      "Modified input mean: -3.039836883544922e-06, std: 0.00518035888671875\n",
      "Original input mean: -3.17692756652832e-05, std: 0.003276824951171875\n",
      "Modified input mean: -1.4185905456542969e-05, std: 0.003276824951171875\n",
      "Original input mean: 5.817413330078125e-05, std: 0.004955291748046875\n",
      "Modified input mean: -4.76837158203125e-06, std: 0.004955291748046875\n",
      "Original input mean: 8.940696716308594e-07, std: 0.0028533935546875\n",
      "Modified input mean: -1.4901161193847656e-05, std: 0.0028533935546875\n",
      "Original input mean: 6.103515625e-05, std: 0.00521087646484375\n",
      "Modified input mean: -2.5033950805664062e-05, std: 0.00521087646484375\n",
      "Original input mean: -3.701448440551758e-05, std: 0.00322723388671875\n",
      "Modified input mean: -2.580881118774414e-05, std: 0.00322723388671875\n",
      "Original input mean: 5.894899368286133e-05, std: 0.005336761474609375\n",
      "Modified input mean: -3.4749507904052734e-05, std: 0.005336761474609375\n",
      "Original input mean: 6.389617919921875e-05, std: 0.005584716796875\n",
      "Modified input mean: -4.64320182800293e-05, std: 0.005584716796875\n",
      "Original input mean: -2.0265579223632812e-06, std: 0.0030689239501953125\n",
      "Modified input mean: -1.621246337890625e-05, std: 0.0030689239501953125\n",
      "Original input mean: 6.580352783203125e-05, std: 0.0056915283203125\n",
      "Modified input mean: -5.269050598144531e-05, std: 0.0056915283203125\n",
      "Original input mean: 7.069110870361328e-05, std: 0.006031036376953125\n",
      "Modified input mean: -6.264448165893555e-05, std: 0.006031036376953125\n",
      "Original input mean: 7.581710815429688e-05, std: 0.006420135498046875\n",
      "Modified input mean: -7.2479248046875e-05, std: 0.006420135498046875\n",
      "Original input mean: 8.028745651245117e-05, std: 0.00677490234375\n",
      "Modified input mean: -8.022785186767578e-05, std: 0.00677490234375\n",
      "Original input mean: 8.32676887512207e-05, std: 0.00701141357421875\n",
      "Modified input mean: -8.404254913330078e-05, std: 0.00701141357421875\n",
      "Original input mean: 8.45193862915039e-05, std: 0.0071563720703125\n",
      "Modified input mean: -8.529424667358398e-05, std: 0.0071563720703125\n",
      "Original input mean: 8.487701416015625e-05, std: 0.007293701171875\n",
      "Modified input mean: -8.666515350341797e-05, std: 0.007293701171875\n",
      "Original input mean: 8.660554885864258e-05, std: 0.007503509521484375\n",
      "Modified input mean: -8.982419967651367e-05, std: 0.007503509521484375\n",
      "Original input mean: 9.113550186157227e-05, std: 0.007785797119140625\n",
      "Modified input mean: -9.387731552124023e-05, std: 0.007785797119140625\n",
      "Original input mean: 9.649991989135742e-05, std: 0.008026123046875\n",
      "Modified input mean: -9.638071060180664e-05, std: 0.008026123046875\n",
      "Generated text: ['Artificial intelligence is transforming the way we interact with technology.\\nJO\\ufeffO\\ufeff\\nA\\ufeffOO03O0\\ufeff2O0O5OO0OOOOOOOOOO\\n']\n"
     ]
    }
   ],
   "execution_count": 12
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-01-15T19:58:49.794475Z",
     "start_time": "2025-01-15T19:58:44.274586Z"
    }
   },
   "cell_type": "code",
   "source": [
    "import torch\n",
    "\n",
    "tensor = torch.tensor([1, 2, 3, 4, 5])\n",
    "print(tensor * 10)"
   ],
   "id": "d043deb604c3970a",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "tensor([10, 20, 30, 40, 50])\n"
     ]
    }
   ],
   "execution_count": 1
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python [conda env:llama_env] *",
   "language": "python",
   "name": "conda-env-llama_env-py"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 2
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython2",
   "version": "2.7.6"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
