import os
import sys
import torch
import typing
import gc
import tqdm, math

current_directory = os.path.dirname(os.path.abspath(__file__))
parent_directory = os.path.abspath(os.path.join(current_directory, '..', ".."))
sys.path.append(parent_directory)

from msmodelslim.pytorch.llm_ptq.accelerate_adapter import PrepareWeight
from ascend_utils import ResListToRelease

from .hadamard_utils import random_hadamard_matrix, walsh_matrix

GLOBAL_DTYPE=torch.float32


class GraphOpt:
    @staticmethod
    def set_module(model,
                   submodule_key,
                   module):
        tokens = submodule_key.split('.')
        sub_tokens = tokens[:-1]
        cur_mod = model
        for s in sub_tokens:
            cur_mod = getattr(cur_mod, s)
        setattr(cur_mod, tokens[-1], module)

def fuse_ln_linear(layernorm: torch.nn.Module, linear_layers: typing.Iterable[torch.nn.Linear]) -> None:
    """
    This function modifies the weights and biases of the provided linear layers
    by incorporating the scaling of the given LayerNorm module.

    Parameters:
    - layernorm (torch.nn.Module): The LayerNorm module whose parameters will be fused.
    - linear_layers (typing.Iterable[torch.nn.Linear]): A list of linear layers adjacent to the LayerNorm module
      that will be updated with the fused parameters.

    Returns:
    - None: The function modifies the linear layers in place.
    """
    # with PrepareWeight(module):
    # linear_layers = [PrepareWeight(linear_mod, True, False) for linear_mod in linear_layers]
    # layernorm = PrepareWeight(layernorm, True, False)
    # import pdb; pdb.set_trace()

    for linear in linear_layers:
        linear_dtype = linear.weight.dtype
        current_weight = linear.weight.data.to(dtype=GLOBAL_DTYPE)
        # Calculating new weight and bias
        if hasattr(layernorm, 'bias'):
            if linear.bias is None:
                linear.bias = torch.nn.Parameter(torch.zeros(linear.out_features, dtype=linear_dtype, device=linear.weight.device))
            ln_bias = linear.bias.data.to(dtype=GLOBAL_DTYPE)
            linear.bias.data = ln_bias + torch.matmul(current_weight, ln_bias)
            linear.bias.data.to(linear_dtype)
        
        linear.weight.data = (current_weight * layernorm.weight.to(dtype=GLOBAL_DTYPE)).to(linear_dtype)
        del current_weight
    return

def fuse_layer_norms_head(model):
    m_norm = get_pre_head_layernorm(model=model)
    m_linear = [get_lm_head(model=model)]
    # import pdb; pdb.set_trace()
    prepare_list = [PrepareWeight(m_norm, post_force=True, post_recurse=False)]
    prepare_list += [PrepareWeight(m_linear, post_force=True, post_recurse=False)]
    with ResListToRelease(*prepare_list):
        fuse_ln_linear(m_norm, m_linear)
        shape, dtype, device = m_norm.weight.data.shape, m_norm.weight.data.dtype, m_norm.weight.data.device
        model.model.norm.weight.data = torch.ones(shape).to(device=device, dtype=dtype)
    # fuse_ln_linear(get_pre_head_layernorm(model=model), [get_lm_head(model=model)])
    return

def fuse_layer_norms_module(module, config_hidden_size, fuse_kv_ln=False, idx=None, model=None):
    """ 
    Fuse the input layernorms into the linear layers
    """ 
    # import pdb; pdb.set_trace()
    Ws = [module.self_attn.q_a_proj, module.self_attn.kv_a_proj_with_mqa, module.self_attn.kv_b_proj, module.self_attn.q_b_proj]
    Ws += [module.input_layernorm, module.self_attn.q_a_layernorm]
    prepare_list = [PrepareWeight(ws, post_force=True) for ws in Ws]
    with ResListToRelease(*prepare_list):
        if hasattr(module.self_attn, "q_proj"):
            fuse_ln_linear(module.input_layernorm, [module.self_attn.q_proj, module.self_attn.kv_a_proj_with_mqa])
        else:
            fuse_ln_linear(module.input_layernorm, [module.self_attn.q_a_proj, module.self_attn.kv_a_proj_with_mqa])
            fuse_ln_linear(module.self_attn.q_a_layernorm, [module.self_attn.q_b_proj])

        if fuse_kv_ln:
            fuse_ln_linear(module.self_attn.kv_a_layernorm, [module.self_attn.kv_b_proj])
            ln_weight = module.self_attn.kv_a_layernorm.weight.data
            shape, dtype, device = ln_weight.shape, ln_weight.dtype, ln_weight.device
            module.self_attn.kv_a_layernorm.weight.data = torch.ones(shape).to(device=device, dtype=dtype)

        m_norm = module.self_attn.q_a_layernorm
        shape, dtype, device = m_norm.weight.data.shape, m_norm.weight.data.dtype, m_norm.weight.data.device
        module.self_attn.q_a_layernorm.weight.data = torch.ones(shape).to(device=device, dtype=dtype)

        m_norm = module.input_layernorm
        shape, dtype, device = m_norm.weight.data.shape, m_norm.weight.data.dtype, m_norm.weight.data.device
        module.input_layernorm.weight.data = torch.ones(shape).to(device=device, dtype=dtype)

    if hasattr(module.mlp, "experts"):
        Ws = [module.mlp.experts[i].up_proj for i in range(len(module.mlp.experts))]
        Ws += [module.mlp.experts[i].gate_proj for i in range(len(module.mlp.experts))]
        Ws += [module.mlp.shared_experts.up_proj, module.mlp.shared_experts.gate_proj, module.mlp.gate]
    else:
        Ws = [module.mlp.up_proj, module.mlp.gate_proj]
    Ws += [module.post_attention_layernorm]
    prepare_list = [PrepareWeight(ws, post_force=True) for ws in Ws]

    with ResListToRelease(*prepare_list):
        if hasattr(module.mlp, "experts"):
            for i in range(len(module.mlp.experts)):
                fuse_ln_linear(module.post_attention_layernorm, [module.mlp.experts[i].up_proj, module.mlp.experts[i].gate_proj])
            fuse_ln_linear(module.post_attention_layernorm, [module.mlp.shared_experts.up_proj, module.mlp.shared_experts.gate_proj, module.mlp.gate])
        else:
            fuse_ln_linear(module.post_attention_layernorm, [module.mlp.up_proj, module.mlp.gate_proj])  

        m_norm = module.post_attention_layernorm
        shape, dtype, device = m_norm.weight.data.shape, m_norm.weight.data.dtype, m_norm.weight.data.device
        module.post_attention_layernorm.weight.data = torch.ones(shape).to(device=device, dtype=dtype)

    # module.input_layernorm = RMSN(config_hidden_size)
    # module.post_attention_layernorm = RMSN(config_hidden_size)
    return

def fuse_layer_norms(model, fuse_kv_ln=False):

    layers = get_transformer_layers(model=model)
    # Fuse the linear operations in Layernorm into the adjacent linear blocks.
    for i, layer in tqdm.tqdm(enumerate(layers)):
        fuse_layer_norms_module(layer, model.config.hidden_size, fuse_kv_ln, idx=i, model=model)

    fuse_layer_norms_head(model)
    return


def rotate_embeddings(model, Q: torch.Tensor) -> None:
    """
    Rotates the embedding weights of the model using a given rotation matrix.

    Parameters:
    - model: The model whose embedding weights need to be rotated.
    - Q (torch.Tensor): The rotation matrix to apply to the embeddings.

    Returns:
    - None: The function modifies the embedding weights in place.
    """
    # import pdb; pdb.set_trace()
    for W in get_embeddings(model):
        dtype = W.weight.data.dtype
        device = W.weight.device
        W_ = W.weight.data.to(device=device, dtype=GLOBAL_DTYPE) ## fix
        W.weight.data = torch.matmul(W_, Q.to(dtype=W_.dtype)).to(dtype=dtype)
        del W_
    return

def rotate_attention_inputs(layer, Q, 
                            q_b_proj_Q=None, 
                            kv_b_proj_Q=None) -> None:
    """                       
    Rotate the W q_proj, W q_a_proj, W q_b_proj and W kv_a_proj_with_mqa matrices of the self-attention layer.
    """ 
    # import pdb; pdb.set_trace()
    if hasattr(layer.self_attn, "q_proj"):
        Ws = [layer.self_attn.q_proj, layer.self_attn.kv_a_proj_with_mqa]
    else:
        Ws = [layer.self_attn.q_a_proj, layer.self_attn.kv_a_proj_with_mqa]

    prepare_list = [PrepareWeight(ws, post_force=True) for ws in Ws]
    with ResListToRelease(*prepare_list):
        for W in Ws:
            dtype = W.weight.dtype
            device = W.weight.device

            W_ = W.weight.to(device=device, dtype=GLOBAL_DTYPE)
            W.weight.data = torch.matmul(W_, Q).to(device=device, dtype=dtype)

            del W_

    if hasattr(layer.self_attn, "q_b_proj"):
        Ws = [layer.self_attn.q_a_proj, layer.self_attn.q_b_proj]
        prepare_list = [PrepareWeight(ws, post_force=True) for ws in Ws]
        with ResListToRelease(*prepare_list):
            W = layer.self_attn.q_b_proj
            device = W.weight.device
            W_ = W.weight.to(device=device, dtype=GLOBAL_DTYPE)
            W.weight.data = torch.matmul(W_, q_b_proj_Q).to(device=device, dtype=dtype)

            W = layer.self_attn.q_a_proj
            W_ = W.weight.to(device=device, dtype=GLOBAL_DTYPE)

            W.weight.data = torch.matmul(q_b_proj_Q.T, W_).to(device=device, dtype=dtype)
            del W_

    if hasattr(layer.self_attn, "kv_b_proj") and kv_b_proj_Q is not None:
        Ws = [layer.self_attn.kv_b_proj, layer.self_attn.kv_a_proj_with_mqa]
        prepare_list = [PrepareWeight(ws, post_force=True) for ws in Ws]
        with ResListToRelease(*prepare_list):
            W = layer.self_attn.kv_a_proj_with_mqa
            dtype, device = W.weight.dtype, W.weight.device
            
            Q_blocks = (kv_b_proj_Q, torch.diag(torch.ones(layer.self_attn.qk_rope_head_dim, device=device)))
            kv_b_proj_Q_reshaped = torch.block_diag(*Q_blocks)
            
            W_ = W.weight.to(device=device, dtype=GLOBAL_DTYPE)
            W.weight.data = torch.matmul(kv_b_proj_Q_reshaped.T, W_).to(device=device, dtype=dtype) 
            del W_
            
            W = layer.self_attn.kv_b_proj
            dtype, device = W.weight.dtype, W.weight.device
            W_ = W.weight.to(device=device, dtype=GLOBAL_DTYPE)
            W.weight.data = torch.matmul(W_, kv_b_proj_Q).to(device=device, dtype=dtype) 

            del W_
    return

def rotate_attention_output(layer, Q) -> None:
    """ 
    Rotate output matrix of the self-attention layer.
    """ 
    with PrepareWeight(layer.self_attn.o_proj, post_force=True):
        W = layer.self_attn.o_proj

        dtype = W.weight.data.dtype
        device = W.weight.device

        W_ = W.weight.data.to(device=device, dtype=GLOBAL_DTYPE)

        W.weight.data = torch.matmul(Q.T, W_).to(device=device, dtype=dtype)
    
        if W.bias is not None:
            b = W.bias.data.to(device=device, dtype=GLOBAL_DTYPE)
            W.bias.data = torch.matmul(Q.T, b).to(device=device, dtype=dtype)

        del W_
        return

def rotate_oproj_input(layer, Q) -> None:
    """                      
    Rotate the W o_proj, of the self-attention layer.
    self.o_proj = nn.Linear(
            self.num_heads * self.v_head_dim,
            self.hidden_size,
            bias=config.attention_bias,
        )
    """ 
    with PrepareWeight(layer.self_attn.o_proj, post_force=True):
        W = layer.self_attn.o_proj

        dtype = W.weight.data.dtype
        device = W.weight.device

        Q = torch.block_diag(*[Q] *layer.self_attn.num_heads)

        W_ = W.weight.to(device=device, dtype=GLOBAL_DTYPE)
        W.weight.data = torch.matmul(W_, Q).to(device=device, dtype=dtype)

        del W_
        return

def rotate_uv_output(layer, Q) -> None:
    """ 
    self.kv_b_proj W -> num_heads*(self.q_head_dim - self.qk_rope_head_dim + self.v_head_dim), config.kv_lora_rank
    kv_b_proj(self.kv_a_layernorm(compressed_kv)) ->
             bsz, q_len, self.num_heads, self.qk_nope_head_dim + self.v_head_dim)
             .transpose(1, 2)
         
    """
    with PrepareWeight(layer.self_attn.kv_b_proj, post_force=True):
        W = layer.self_attn.kv_b_proj
        
        dtype = W.weight.data.dtype
        device = W.weight.device

        # Create expanded Q
        # [I_qk_nope_head_dim 0; 
        #   0 Q]
        ## qk_nope_head_dim: 512, v_head_dim: 128, Q:128*128
        ## 
        Q_transformed = torch.cat([
            torch.cat([torch.eye(layer.self_attn.qk_nope_head_dim, device=device), torch.zeros(layer.self_attn.qk_nope_head_dim, layer.self_attn.v_head_dim, device=device)], dim=1),
            torch.cat([torch.zeros(layer.self_attn.v_head_dim, layer.self_attn.qk_nope_head_dim, device=device), Q], dim=1)
        ], dim=0)

        Q_transformed = torch.block_diag(*[Q_transformed] *layer.self_attn.num_heads)

        W_ = W.weight.data.to(device=device, dtype=GLOBAL_DTYPE)
        W.weight.data = torch.matmul(Q_transformed.T, W_).to(device=device, dtype=dtype)

        if W.bias is not None:
            b = W.bias.data.to(device=device, dtype=GLOBAL_DTYPE)
            W.bias.data = torch.matmul(Q_transformed.T, b).to(device=device, dtype=dtype)

        del W_
        return

def rotate_mlp_input(layer, Q):
    """
    Rotates the input weights of the MLP (Multi-Layer Perceptron) layer.

    Parameters:
    - layer: The transformer layer containing the MLP whose input weights need to be rotated.
    - Q: A torch.Tensor representing the rotation matrix..

    Returns:
    - None
    """
    
    mlp_inputs = []
    if hasattr(layer.mlp, "experts"):
        for i in range(len(layer.mlp.experts)):
            mlp_inputs += [layer.mlp.experts[i].up_proj, layer.mlp.experts[i].gate_proj]
        mlp_inputs += [layer.mlp.shared_experts.up_proj, layer.mlp.shared_experts.gate_proj]
        mlp_inputs += [layer.mlp.gate]
    else:
        mlp_inputs = [layer.mlp.up_proj, layer.mlp.gate_proj]

    prepare_list = [PrepareWeight(ws, post_force=True) for ws in mlp_inputs]
    with ResListToRelease(*prepare_list):
        for W in mlp_inputs:
            dtype = W.weight.dtype
            device = W.weight.device
            W_ = W.weight.data.to(device=device, dtype=GLOBAL_DTYPE)

            W.weight.data = torch.matmul(W_, Q).to(device=device, dtype=dtype)
            del W_
        return

def rotate_mlp_output(layer, Q):
    """ 
    Rotate the MLP output weights and bias.
    """ 
    cur_dtype = torch.float
    Ws = []
    if hasattr(layer.mlp, "experts"):
        for i in range(len(layer.mlp.experts)):
            Ws += [layer.mlp.experts[i].down_proj]
        
        Ws += [layer.mlp.shared_experts.down_proj]
    else:
        Ws = [layer.mlp.down_proj]

    # import pdb; pdb.set_trace()
    prepare_list = [PrepareWeight(ws, post_force=True) for ws in Ws] 
    with ResListToRelease(*prepare_list):
        for W in Ws:
            dtype = W.weight.data.dtype
            device = W.weight.device
        
            W_ = W.weight.data.to(device=device, dtype=GLOBAL_DTYPE)
            W.weight.data = torch.matmul(Q.T, W_).to(dtype=dtype)


            if W.bias is not None:
                b = W.bias.data.to(device=device, dtype=GLOBAL_DTYPE)
                W.bias.data = torch.matmul(Q.T, b.to(cur_dtype)).to(dtype=dtype)
            
            del W_
        return

def rotate_head(model, Q: torch.Tensor) -> None:
    """ 
    Rotate the head.
    """ 
    
    W = get_lm_head(model)
    dtype = W.weight.data.dtype
    device = W.weight.device

    W_ = W.weight.data.to(device=device, dtype=GLOBAL_DTYPE)  ## fix
    W.weight.data = torch.matmul(W_, Q.to(dtype=W_.dtype)).to(dtype=dtype)
    del W_
    return    


def random_hadamard_matrix_block(in_channel, stride, eye_step, device):
    """生成对角线上放置Hadamard矩阵的in_channel维矩阵"""
    # 生成stride维的Hadamard矩阵
    H_stride = random_hadamard_matrix(stride, device)
    
    # 创建in_channel维的全零张量，并确保它在指定的设备上
    H_in_channel = torch.zeros((in_channel, in_channel), device=device, dtype=torch.float32)  ##
    
    # 将stride维的Hadamard矩阵的副本放置在in_channel维矩阵的对角线上
    num_blocks = in_channel // stride
    for i in range(num_blocks):
        start_idx = i * stride
        end_idx = start_idx + stride
        if i in eye_step:
            print(f"EYE ADD in BRB Matrix at idx {i}")
            # import pdb; pdb.set_trace()
            H_in_channel[start_idx:end_idx, start_idx:end_idx] = torch.eye(stride).to(device=device, dtype=torch.float32)  ##
        else:
            H_in_channel[start_idx:end_idx, start_idx:end_idx] = H_stride
    
    return H_in_channel

def create_Q(size, group_size, mode, rot_step=1, eye_step=[-1], device="cuda"):
    block_size = 32
    shift = 16
    if group_size == -1:
        transformation_dim = size
    else:
        transformation_dim = group_size

    if mode == "walsh":
        Q = walsh_matrix(transformation_dim, GLOBAL_DTYPE, device)/math.sqrt(transformation_dim)
    elif mode == "hadamard":
        Q = random_hadamard_matrix(transformation_dim, device)
    elif mode == 'block_hadamard':
        Q = random_hadamard_matrix_block(size, 32, eye_step, device)
    elif mode == 'block_hadamard_shifted':
        # import pdb; pdb.set_trace()
        R = random_hadamard_matrix_block(size, 32, eye_step, device)
        I = torch.eye(size, dtype=torch.float32) ##
        P = torch.cat((I[:, -shift:], I[:, :-shift]), dim=1).to(R.device)
        if rot_step == 1:
            Q = R @ P @ R
        elif rot_step == 2:
            Q = R @ P @ R @ P.T @ R.T
        elif rot_step == 3:
            Q = R @ P @ R @ P.T @ R.T @ P @ R @ P.T @ R.T
        else:
            raise ValueError("rot_step must be 1, 2, or 3")

    if group_size != -1:
        Q = Q.repeat(size // group_size, 1, 1)
        Q = torch.block_diag(*Q)

    return Q


@torch.no_grad()
def rotate_emb_head(model, Q):
    rotate_embeddings(model, Q)
    rotate_head(model, Q)
    return


@torch.no_grad()
def rotate_module(module, Q, Q_att_uv, q_b_proj_Q, kv_b_proj_Q):

    # TODO support multiblock rotation
    rotate_attention_inputs(module, Q, 
                                q_b_proj_Q=q_b_proj_Q, 
                                kv_b_proj_Q=kv_b_proj_Q)
        
    rotate_uv_output(module, Q_att_uv)
    rotate_oproj_input(module, Q_att_uv)
    
    rotate_attention_output(module, Q)
    rotate_mlp_input(module, Q)
    rotate_mlp_output(module, Q)
    
    del Q
    gc.collect()
    torch.cuda.empty_cache()
    return

@torch.no_grad()
def create_rotation_matrix(model, layers, group_size, walsh, rotate_kv, device, args):

    v_head_dim = model.config.v_head_dim
    Q_att_uv = create_Q(v_head_dim, group_size=-1, mode=args.r2_mode, device=device)
    
    q_b_proj_Q = None

    if hasattr(layers[0].self_attn, "q_b_proj"):
        q_b_proj_Q = create_Q(layers[0].self_attn.q_lora_rank, group_size=group_size, mode=args.r2_mode, rot_step=args.r2_step, eye_step=args.eye_step, device=device)

    kv_b_proj_Q = None
    if hasattr(layers[0].self_attn, "kv_b_proj") and rotate_kv:
        kv_b_proj_Q = create_Q(layers[0].self_attn.kv_lora_rank, group_size=group_size, mode=args.r2_mode, rot_step=args.r2_step, eye_step=args.eye_step, device=device)

    return Q_att_uv, q_b_proj_Q, kv_b_proj_Q

@torch.no_grad()
def rotate_model(model, group_size=-1, walsh=False, rotate_kv=False, args=None):

    device = get_lm_head(model).weight.device
    #R1
    Q = create_Q(model.config.hidden_size, group_size=group_size, mode=args.r1_mode, rot_step=args.r1_step, eye_step=args.eye_step, device=device)
    # import pdb; pdb.set_trace()
    rotate_emb_head(model, Q)

    layers = get_transformer_layers(model)
    #R2
    Q_att_uv, q_b_proj_Q, kv_b_proj_Q = create_rotation_matrix(model, layers, group_size, walsh, rotate_kv, device, args)

    for idx, _ in enumerate(tqdm.tqdm(layers, unit="layer", desc="Rotating")):
        # import pdb; pdb.set_trace()
        with PrepareWeight(layers[idx], True, True):
            rotate_module(layers[idx], Q, Q_att_uv, q_b_proj_Q, kv_b_proj_Q)
    return


import transformers
OPT_MODEL = transformers.models.opt.modeling_opt.OPTForCausalLM
OPT_LAYER = transformers.models.opt.modeling_opt.OPTDecoderLayer
LLAMA_MODEL = transformers.models.llama.modeling_llama.LlamaForCausalLM
LLAMA_LAYER = transformers.models.llama.modeling_llama.LlamaDecoderLayer

def get_lm_head(model, model_type=None):
    if model_type == LLAMA_MODEL:
        return model.lm_head
    elif model_type == OPT_MODEL:
        return model.lm_head
    else:
        return model.lm_head
        raise ValueError(f'Unknown model type {model_type}')


def get_transformer_layers(model):
    return [layer for layer in model.model.layers]


def get_embeddings(model, model_type=None) -> list[torch.nn.Module]:
    if model_type == LLAMA_MODEL:
        return [model.model.embed_tokens]
    elif model_type == OPT_MODEL:
        return [model.model.decoder.embed_tokens, model.model.decoder.embed_positions]
    else:
        return [model.model.embed_tokens]
        raise ValueError(f'Unknown model type {model_type}')


def get_pre_head_layernorm(model, model_type=None):
    if model_type == LLAMA_MODEL:
        pre_head_layernorm = model.model.norm
        # assert isinstance(pre_head_layernorm,
        #                   transformers.models.llama.modeling_llama.LlamaRMSNorm)
    elif model_type == OPT_MODEL:
        pre_head_layernorm = model.model.decoder.final_layer_norm
        assert pre_head_layernorm is not None
    else:
        pre_head_layernorm = model.model.norm
        assert pre_head_layernorm is not None
        # raise ValueError(f'Unknown model type {model_type}')
    return pre_head_layernorm


class RMSN(torch.nn.Module):
    """
    This class implements the Root Mean Square Normalization (RMSN) layer.
    We use the implementation from LLAMARMSNorm here:
    https://github.com/huggingface/transformers/blob/main/src/transformers/models/llama/modeling_llama.py#L75
    """

    def __init__(self, mean_dim: int, eps=1e-5):
        super().__init__()
        self.eps = eps
        self.mean_dim = mean_dim
        # self.weight = torch.nn.Parameter(torch.ones(mean_dim))
        # self.weight = torch.nn.Parameter(torch.zeros(mean_dim))
        # self.weight = torch.nn.Parameter(torch.zeros(1))

    def forward(self, x: torch.Tensor) -> torch.Tensor:
        input_dtype = x.dtype
        if x.dtype == torch.float16:
            x = x.to(torch.float32)  ## fix
        variance = x.pow(2).sum(-1, keepdim=True) / self.mean_dim
        x = x * torch.rsqrt(variance + self.eps)
        return x.to(input_dtype)
