# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0

# DeepSpeed Team
import math
import torch
from torch import nn
import torch.nn.functional as F
from deepspeed.compression.helper import recursive_getattr, recursive_setattr
import deepspeed


class LinearLayer_LoRA(nn.Module):
    # an simple implementation of LoRA
    # for now only support Linear Layer
    def __init__(self,
                 weight,
                 lora_dim=0,
                 lora_scaling=1,
                 lora_droppout=0,
                 bias=None):
        super(LinearLayer_LoRA, self).__init__()
        self.weight = weight
        self.bias = bias

        if lora_dim <= 0:
            raise ValueError(
                "You are training to use LoRA, whose reduced dim should be larger than 1"
            )

        try:
            # for zero stage 3
            # ! 在阶段3中，模型的权重会被分成多个片段并分布在不同的设备上，因此无法直接获取权重的形状 
            # ? 作何理解
            rows, columns = weight.ds_shape
        except:
            rows, columns = weight.shape
        # 用于在低维空间中对原始权重进行适应
        self.lora_right_weight = nn.Parameter(torch.zeros(
            columns,
            lora_dim))  # apply transpose so in forward we do not need to
        self.lora_left_weight = nn.Parameter(torch.zeros(lora_dim, rows))
        # 缩放因子，用于调整Lora参数的影响大小
        self.lora_scaling = lora_scaling / lora_dim

        if lora_droppout > 0:
            self.lora_dropout = nn.Dropout(lora_droppout)
        else:
            # 恒等映射
            self.lora_dropout = nn.Identity()
        # 初始化Lora参数
        self.reset_parameters()
        # disable the original weight gradient
        # 关闭原始的权重的梯度更新
        self.weight.requires_grad = False
        # fuse LoRA to the original weight
        # 是否将Lora参数融合到原始权重中
        self.fuse_lora = False # ! 如果为True

    def eval(self):
        self.lora_dropout.eval()

    #   self.fuse_lora_weight()

    def train(self, mode=True):
        self.lora_dropout.train(mode)
        # self.unfuse_lora_weight()

    def reset_parameters(self):
        nn.init.kaiming_uniform_(self.lora_right_weight, a=math.sqrt(5)) # kaiming_uniform_主要用于优化Relu激活函数，使用均值分布主要避免B学习到非零值时，可以产生有效的梯度信号，不至于训练不稳定。
        nn.init.zeros_(self.lora_left_weight) # B 初始权重为0，是为了训练开始时模型输出与原模型一致

    def fuse_lora_weight(self):
        # 将原始的权重和lora的权重进行融合
        # 如果没有融合，那么它会将LOra的权重和原始的权重加在一起
        # 这个过程实际上是一个矩阵乘法操作，然后乘以一个比例因子lora_scaling
        # 最后将fuse_lora设置为True，表示已经进行了权重融合
        if not self.fuse_lora:
            self.weight.data += self.lora_scaling * torch.matmul(
                self.lora_left_weight.t(), self.lora_right_weight.t())
        self.fuse_lora = True

    def unfuse_lora_weight(self):
        if self.fuse_lora:
            self.weight.data -= self.lora_scaling * torch.matmul(
                self.lora_left_weight.t(), self.lora_right_weight.t())
        self.fuse_lora = False

    def forward(self, input):
        if self.fuse_lora:
            return F.linear(input, self.weight, self.bias)
        else:
            return F.linear(
                input, self.       ,
                self.bias) + (self.lora_dropout(input) @ self.lora_right_weight
                              @ self.lora_left_weight) * self.lora_scaling


# convert the linear layer to LoRA
def convert_linear_layer_to_lora(model,
                                 part_module_name,
                                 lora_dim=0,
                                 lora_scaling=1,
                                 lora_droppout=0):
    replace_name = []
    for name, module in model.named_modules():
        if isinstance(module, nn.Linear) and part_module_name in name:
            replace_name.append(name)
    for name in replace_name:
        # 获取模型中对应的块
        '''
            # * name的字符串格式****.####
            # * recursive_getattr 内部一个循环 遍历查找符合要求的模块
        '''
        module = recursive_getattr(model, name)
        # 使用LinearLayer_LoRA类创建一个新的LoRA层，该层的权重，偏置以及其他参数从原模块中继承，并将其转移到了原模块的设备和数据类型上
        tmp = LinearLayer_LoRA(
            module.weight, lora_dim, lora_scaling, lora_droppout,
            module.bias).to(module.weight.device).to(module.weight.dtype)
        # 将模型中原模块替换为新的LoRA层
        recursive_setattr(model, name, tmp)
    return model


def _z3_params_to_fetch(param_list):
    # 检查每个参数是否有ds_id属性，这是DeepSpeed的zero-3优化的标记，如果一个参数有这个属性，那么它被zero-3分区
    # 检查该参数的ds_status属性，表示参数在当前设备上是否可用的标志。如果等于NOT_AVAILABLE，则表示该参数在当前设备上不可用
    return [
        p for p in param_list
        if hasattr(p, 'ds_id') and p.ds_status == deepspeed.runtime.zero.
        partition_parameters.ZeroParamStatus.NOT_AVAILABLE
    ]


# convert the LoRA layer to linear layer
def convert_lora_to_linear_layer(model):
    replace_name = []
    for name, module in model.named_modules():
        # 如果某个模块是LoRA层，那么将其名字添加到replace_name列表中
        if isinstance(module, LinearLayer_LoRA):
            replace_name.append(name)
    for name in replace_name:
        # 获取模型中的Lora层对应的模块
        module = recursive_getattr(model, name)
        # 是否使用了DeepSpeed的Zero-3优化策略
        zero_stage_3 = hasattr(module.weight, 'ds_id')
        # 获取lora层中所有参数
        with deepspeed.zero.GatheredParameters(_z3_params_to_fetch([
                module.weight, module.bias, module.lora_left_weight,
                module.lora_right_weight
        ]),
                                               modifier_rank=0,
                                               enabled=zero_stage_3):
            # 将Lora和源模型权重融合
            module.fuse_lora_weight()
    return model


def only_optimize_lora_parameters(model, force_optimize_params=[]):
    # turn off the gradient of all the parameters except the LoRA parameters
    for name, param in model.named_parameters():
        if "lora_right_weight" in name or "lora_left_weight" in name or name in force_optimize_params:
            param.requires_grad = True
        else:
            param.requires_grad = False
    return model


def make_model_gradient_checkpointing_compatible(model):
    # Higgingface added this enable input require grads function to make gradient checkpointing work for lora-only optimization
    if hasattr(model, "enable_input_require_grads"):
        model.enable_input_require_grads()
    elif hasattr(model, "get_input_embeddings"):

        def make_inputs_require_grad(module, input, output):
            output.requires_grad_(True)

        model.get_input_embeddings().register_forward_hook(
            make_inputs_require_grad)
    return model
