{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# step1 导入相关包\n",
    "import torch"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 用lora 替换线性层\n",
    "def replace_linear_with_lora(\n",
    "  module: torch.nn.Module,\n",
    "  r: int = 4,\n",
    "  lora_alpha: int = 16,\n",
    "  lora_dropout: float = 0.0,\n",
    "  embed_requires_grad: bool = False, # embedding层 是否训练\n",
    "  norm_requires_grad: bool = False, # norm层 是否训练\n",
    "  head_requires_grad: bool = True, # head层 是否训练\n",
    "  test_mode: bool = False, # 测试模式，用于控制 lora_B 是否为全零\n",
    "):\n",
    "  \"\"\"\n",
    "  找到 module 中所有线性层并递归替换\n",
    "  \"\"\"\n",
    "\n",
    "  for name, child in module.named_children():\n",
    "    # 先处理额外的层，lm_head 也是 linear，所以先处理\n",
    "    if any(s in name for s in ['embed', 'norm', 'lm_head']):\n",
    "      # if 'embed' in name:\n",
    "      #   requires_grad = embed_requires_grad\n",
    "      # elif 'norm' in name:\n",
    "      #   requires_grad = norm_requires_grad\n",
    "      # else:  # lm_head\n",
    "      #   requires_grad = head_requires_grad\n",
    "      # 实际上，Python 中更推荐使用括号来自动续行：\n",
    "      requires_grad = (\n",
    "        embed_requires_grad if 'embed' in name\n",
    "        else norm_requires_grad if 'norm' in name\n",
    "        else head_requires_grad\n",
    "      )\n",
    "      for param in child.parameters():\n",
    "        param.requires_grad = requires_grad\n",
    "    # 替换所有线性层，QLoRA 做法\n",
    "    elif isinstance(child, torch.nn.Linear):\n",
    "      lora_linear = LoraLinear(child, r=r, alpha=lora_alpha, dropout_p=lora_dropout, test_mode=test_mode)\n",
    "      setattr(module, name, lora_linear)\n",
    "    else:\n",
    "      # 递归向下替换，直到所有线性层都被替换\n",
    "      replace_linear_with_lora(\n",
    "        child,\n",
    "        r=r,\n",
    "        lora_alpha=lora_alpha,\n",
    "        lora_dropout=lora_dropout,\n",
    "        embed_requires_grad=embed_requires_grad,\n",
    "        norm_requires_grad=norm_requires_grad,\n",
    "        head_requires_grad=head_requires_grad,\n",
    "        test_mode=test_mode\n",
    "      )\n",
    "\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "unknown\n"
     ]
    }
   ],
   "source": [
    "# for name in ['embed', 'norm', 'lm_head']:\n",
    "#   print(name in 'lm_head')  # True\n",
    "\n",
    "# s in name for s in ['embed', 'norm', 'lm_head']\n",
    "# name = 'lm_head'\n",
    "# name = \"transformer.embed.weight\"\n",
    "# name = \"lm_head.weight\"\n",
    "name = \"transformer.layer.0.attention.weight\"\n",
    "requires_grad = ''\n",
    "if any(s in name for s in ['embed', 'norm', 'lm_head']):\n",
    "  print(f\"name: {name}\")\n",
    "  if 'embed' in name:\n",
    "    requires_grad = 'embed_requires_grad'\n",
    "  elif 'norm' in name:\n",
    "    requires_grad = 'norm_requires_grad'\n",
    "  else:\n",
    "    requires_grad = 'head_requires_grad'\n",
    "else:\n",
    "  requires_grad = 'unknown'\n",
    "# print(any(s in name for s in ['embed', 'norm', 'lm_head']))  # True\n",
    "\n",
    "print(requires_grad)\n"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.13.5"
  },
  "orig_nbformat": 4,
  "vscode": {
   "interpreter": {
    "hash": "340a546b2c4c9cf5d23eb4a2a4e78e923e0b7afe4d00162258c4442b3ee3b061"
   }
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
