# =============================================================================
# Copyright (c) 2025 Huawei Technologies Co., Ltd. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================

import os
import torch
from utils import ARGS, print_rank_0, all_gather


kv_scaling_factor_dict = {}
comm_scaling_factor_dict = {}
kv_scaling_factor_dict_infer = None
comm_scaling_factor_dict_infer = None
#kv quant: at the end of the inference, get the kv and calculate the scale, can be used in multi turn inference


# ################## kv static quant ################################
def check_nan_inf(tensor):
    return torch.isnan(tensor).any() or torch.isinf(tensor).any()


def replace_nan_inf_with_mean(tensor):
    # 计算非 NaN 和非无穷大值的均值
    valid_values = tensor[~torch.isnan(tensor) & ~torch.isinf(tensor)]
    mean_value = torch.mean(valid_values)
    # 将 NaN 和无穷大值替换为均值
    tensor[torch.isnan(tensor) | torch.isinf(tensor)] = mean_value
    return tensor


@torch.no_grad()
def monitor_static_scaling(x, bits):
    """
    store the scaling calculated from different batches
    x: kv-cache with shape [b, s, n, h],
        where b: bsz; s: seq-len; n: num_head; h: head_dim
    """
    if check_nan_inf(x):
        print('layer past have nan', flush=True)
    quant_max = 2 ** (bits - 1) - 1
    x = x.transpose(1, 2).contiguous()      #BNSD -> BSND
    x_ = x.view(-1, x.shape[-2], x.shape[-1])   #BS, N, D
    scaling_factor = x_.abs().max(0, keepdim=False).values / quant_max  # N, D

    return scaling_factor


# per channel static quant
def update_scaling_factors_kvcache(k_cache, v_cache, context_length, layer_id):
    # initialize the xmin_xmax_dict
    bits = 8  #INT8 quant
    key_memory = k_cache[:, :, :context_length]
    value_memory = v_cache[:, :, :context_length]
    current_key_scaling_factor = monitor_static_scaling(key_memory, bits)
    current_value_scaling_factor = monitor_static_scaling(value_memory, bits)
    if layer_id not in kv_scaling_factor_dict: 
        kv_scaling_factor_dict[layer_id] = (current_key_scaling_factor, current_value_scaling_factor)
    else:
        key_scaling_factor = torch.maximum(kv_scaling_factor_dict[layer_id][0], current_key_scaling_factor)
        value_scaling_factor = torch.maximum(kv_scaling_factor_dict[layer_id][1], current_value_scaling_factor)
        kv_scaling_factor_dict[layer_id] = (key_scaling_factor, value_scaling_factor)


def kv_collect(module, context_length, prefix=None):
    # 300I w8a8 compile: all linear weights need to be cast into nz format; GMM weights need to be transposed and cast into nz format.
    from model import PanguAttention
    if isinstance(module, PanguAttention):
        parts = prefix.split("layers.")
        layer_id = int(parts[1].split(".")[0])  # 提取 `layers.` 后的第一个数字
        update_scaling_factors_kvcache(module.k_cache.data, module.v_cache.data, context_length, layer_id)
    for child_name, child_module in list(module.named_children()):
        if prefix is not None:
            child_name = prefix + '.' + child_name
        kv_collect(child_module, context_length, prefix=child_name)


def save_kv_scaling_factors(save_path):
    # save kv scalings
    rank = torch.distributed.get_rank()
    if rank == 0:
        if not os.path.exists(save_path):
            os.makedirs(save_path)
    torch.distributed.barrier()
    
    for key in kv_scaling_factor_dict:
        key_scaling_factor = all_gather(kv_scaling_factor_dict[key][0], ARGS.atten_tp_group, ARGS.atten_tp_size).cpu()
        value_scaling_factor = all_gather(kv_scaling_factor_dict[key][1], ARGS.atten_tp_group, ARGS.atten_tp_size).cpu()
        (key_scaling_factor, value_scaling_factor)
        kv_scaling_factor_dict[key] = (key_scaling_factor, value_scaling_factor)

    if torch.distributed.get_rank() == 0:
        torch.save(kv_scaling_factor_dict, os.path.join(save_path, f'kv_scalings.pt'))
    torch.distributed.barrier()
    print_rank_0("kvcache quant calibration done.")


def load_kv_scaling_factor(module, load_dir, prefix=None):
    global kv_scaling_factor_dict_infer
    if kv_scaling_factor_dict_infer is None:
        rank = torch.distributed.get_rank()
        load_path = os.path.join(load_dir, f'kv_scalings.pt')
        kv_scaling_factor_dict_infer = torch.load(load_path, map_location=torch.device("cpu"), weights_only=True)
    from model import PanguAttention
    if isinstance(module, PanguAttention):
        parts = prefix.split("layers.")
        layer_id = int(parts[1].split(".")[0])  # get layer_id
        tp_size = ARGS.atten_tp_size
        tp_rank = ARGS.atten_tp_rank
        module.k_cache_scale = kv_scaling_factor_dict_infer[layer_id][0].chunk(tp_size)[tp_rank].npu().view(-1).to(dtype=torch.float32)    # 1, kv_head_num, head_dim -> 1, kv_head_num, 1, head_dim
        module.v_cache_scale = kv_scaling_factor_dict_infer[layer_id][1].chunk(tp_size)[tp_rank].npu().view(-1).to(dtype=torch.float32)
        # module.v_cache_scale = kv_scaling_factor_dict_infer[layer_id][1].chunk(tp_size)[tp_rank].npu().unsqueeze(0).unsqueeze(2)
        module.kv_cache_scale.data = torch.stack([module.k_cache_scale, module.v_cache_scale], dim=0).to(dtype=torch.float16).contiguous()  # 2, kv_head_num * head_dim

    for child_name, child_module in list(module.named_children()):
        if prefix is not None:
            child_name = prefix + '.' + child_name
        load_kv_scaling_factor(child_module, load_dir, prefix=child_name)


# ################## comm static quant ################################
def cal_comm_static_scaling(activation, bits):
    """
    store the scaling calculated from different batches
    x: kv-cache with shape [b, s, n, h],
        where b: bsz; s: seq-len; n: num_head; h: head_dim
    """
    if check_nan_inf(activation):
        print('layer past have nan', flush=True)
    quant_max = 2 ** (bits - 1) - 1
    scaling_factor = activation.max() / quant_max
    return scaling_factor


def comm_quant(layer_id, activation, indice):    # 0:after attention  1:after moe
    bits = 8  #INT8 quant
    if layer_id not in comm_scaling_factor_dict:
        comm_scaling_factor_dict[layer_id] = (None, None)
    current_activation_scaling_factor = cal_comm_static_scaling(activation, bits).cpu().to(dtype=torch.float32)
    if comm_scaling_factor_dict[layer_id][indice] is not None:
        current_activation_scaling_factor = torch.maximum(comm_scaling_factor_dict[layer_id][indice], current_activation_scaling_factor)
    
    if indice == 0:
        comm_scaling_factor_dict[layer_id] = (current_activation_scaling_factor, comm_scaling_factor_dict[layer_id][1])
    elif indice == 1:
        comm_scaling_factor_dict[layer_id] = (comm_scaling_factor_dict[layer_id][0], current_activation_scaling_factor)


def save_comm_scaling_factors(save_path):
    # save comm scalings
    rank = torch.distributed.get_rank()
    if rank == 0:
        if not os.path.exists(save_path):
            os.makedirs(save_path)
    torch.distributed.barrier()
    if torch.distributed.get_rank() == 0:
        torch.save(comm_scaling_factor_dict, os.path.join(save_path, f'comm_scalings.pt'))
    print_rank_0("comm quant calibration done.")


def load_comm_scaling_factor(module, load_dir, prefix=None):
    global comm_scaling_factor_dict_infer
    if comm_scaling_factor_dict_infer is None:
        rank = torch.distributed.get_rank()
        load_path = os.path.join(load_dir, f'comm_scalings.pt')
        comm_scaling_factor_dict_infer = torch.load(load_path, map_location=torch.device("cpu"), weights_only=True)
    from model import PanguDecoderLayer
    if isinstance(module, PanguDecoderLayer):
        parts = prefix.split("layers.")
        layer_id = int(parts[1].split(".")[0])  # get layer_id
        hidden_size = module.model_args.hidden_size
        module.comm_scale1 = comm_scaling_factor_dict_infer[layer_id][0].npu().unsqueeze(0).repeat(hidden_size).to(dtype=torch.float32)
        module.comm_scale2 = comm_scaling_factor_dict_infer[layer_id][1].npu().unsqueeze(0).repeat(hidden_size).to(dtype=torch.float32)
    for child_name, child_module in list(module.named_children()):
        if prefix is not None:
            child_name = prefix + '.' + child_name
        load_comm_scaling_factor(child_module, load_dir, prefix=child_name)
