import time
import math
import copy
import operator
import re
from functools import reduce

import numpy as np
import torch
import torch.distributed
import torch_npu
from megatron.training.global_vars import get_args

from .operator_model_cache import operator_cache
from ..utils import logger, get_model_config, get_system_config
from ..utils.utils import GlobalMemoryBuffer
from ..utils.profiler import CommProfiling


_ITERATION_LOOP_TIME = 10
_GLOBAL_ATTN_MASK = None

def get_attention_mask():
    global _GLOBAL_ATTN_MASK
    args = get_args()
    if args.use_flash_attn and (
        args.seq_length > 2048 or args.context_parallel_algo in ['megatron_cp_algo', 'hybrid_cp_algo']
    ):
        args.sparse_mode = 2
        _GLOBAL_ATTN_MASK = torch.triu(
            torch.ones([2048, 2048], dtype=bool, device=torch.cuda.current_device()), diagonal=1
        )
    else:
        args.sparse_mode = 0
        _GLOBAL_ATTN_MASK = (
            torch.tril(
                torch.ones(
                    [args.micro_batch_size, 1, args.seq_length, args.seq_length], 
                    dtype=bool, 
                    device=torch.cuda.current_device()
                ), 
                diagonal=-(args.pre_tockens + 1)
            )
            + 
            torch.triu(
                torch.ones(
                    [args.micro_batch_size, 1, args.seq_length, args.seq_length], 
                    dtype=bool, 
                    device=torch.cuda.current_device()
                ), 
                diagonal=args.next_tockens + 1
            )
        )
    return _GLOBAL_ATTN_MASK

def op_matmul(x):
    lm, rm = x

    if lm.shape[1] == rm.shape[0]:
        result = torch.matmul(lm, rm)
    elif lm.shape[1] == rm.shape[1]:
        result = torch.matmul(lm, rm.t())
    elif lm.shape[0] == rm.shape[0]:
        result = torch.matmul(lm.t(), rm)
    else:
        raise AssertionError(f"op_matmul error, lm.shape={lm.shape} rm.shape={rm.shape}")
    
    return result


def op_topk(x):
    input_ = x[0]
    values, indices = torch.topk(input=input_, k=get_model_config().args.moe_router_topk, dim=1)
    return values
    

def op_nonzero(x):
    input_ = x[0]
    result = torch.nonzero(input=input_)
    return result


def op_swiglu(x):
    input_ = x[0]
    result = torch_npu.npu_swiglu(input_, dim=-1)
    return result


def op_swiglu_grad(x):
    result = torch_npu.npu_swiglu_backward(x[0], x[1], dim=-1)
    return result


class Softmax(torch.nn.Module):
    def __init__(self):
        super().__init__()
        self.dim = -1
    
    def forward(self, x):
        input_ = x[0]
        return torch.softmax(input=input_, dim=self.dim, dtype=torch.float32)


class LayerNorm(torch.nn.Module):
    def __init__(self, eps=1e-5):
        super().__init__()
        self.layer_norm = torch.nn.LayerNorm(normalized_shape=get_model_config().args.hidden_size, eps=eps)

    def forward(self, x):
        return self.layer_norm(*x)
    

class FusedRmsNorm(torch.nn.Module):
    def __init__(self, eps=1e-6) -> None:
        super().__init__()
        self.weight = torch.nn.Parameter(torch.ones(get_model_config().args.hidden_size)).npu()
        self.eps = eps
    
    def forward(self, x):
        return torch_npu.npu_rms_norm(x[0].requires_grad_(), self.weight, epsilon=self.eps)[0]
    

class FlashAttention(torch.nn.Module):
    def __init__(self):
        super().__init__()
        self.head_dim = get_model_config().args.hidden_size // get_model_config().args.num_attention_heads
        self.scale = 1.0 / math.sqrt(self.head_dim)
        self.pre_tockens = 65536
        self.next_tockens = 0

        self.attention_mask = get_attention_mask()

    def forward(self, x):
        q, k, v = x
        seq_length, _, hd = q.shape[0], q.shape[1], q.shape[2]
        head_num = hd // self.head_dim
        output = torch_npu.npu_fusion_attention(
            q, k, v, head_num, 'SBH',
            pse=None,
            padding_mask=None,
            atten_mask=self.attention_mask,
            scale=self.scale,
            pre_tockens=self.pre_tockens,
            next_tockens=self.next_tockens,
            keep_prob=1.0,
            inner_precise=0,
            sparse_mode=get_args().sparse_mode
        )[0]
        return output
    

def help_profiler(model, input_tensors):
    model.to(torch.cuda.current_device())

    for input_tensor in input_tensors:
        if input_tensor.dtype != torch.bool:
            input_tensor.requires_grad_()
    
    sum_z = None
    for _ in range(3):
        sum_z = model(input_tensors)

    # forward_time
    torch.npu.synchronize()
    start_time = time.time()
    for _ in range(_ITERATION_LOOP_TIME):
        model(input_tensors)
    torch.npu.synchronize()
    fwd_time = (time.time() - start_time) * 1e6 / _ITERATION_LOOP_TIME

    for _ in range(3):
        z = model(input_tensors)
        loss = torch.sum(z)
        loss.backward()

    torch.npu.synchronize()
    start_time = time.time()
    for _ in range(_ITERATION_LOOP_TIME):
        torch.sum(sum_z)
    torch.npu.synchronize()
    loss_time = (time.time() - start_time) * 1e6 / _ITERATION_LOOP_TIME

    torch.npu.synchronize()
    start_time = time.time()
    for _ in range(_ITERATION_LOOP_TIME):
        z = model(input_tensors)
        loss = torch.sum(z)
        loss.backward()
    torch.npu.synchronize()
    bwd_time = (time.time() - start_time) * 1e6 / _ITERATION_LOOP_TIME - fwd_time - loss_time
    return fwd_time, bwd_time


def profiler(op_type, input_shapes, data_types, num_samples):
    _FUNC_TABLE = {
        'MatMul': op_matmul,
        'TopK': op_topk,
        'Softmax': Softmax,
        'SoftmaxGrad': Softmax,
        'NonZero': op_nonzero,
        'SwiGlu': op_swiglu,
        'SwiGluGrad': op_swiglu_grad,
        'LayerNorm': LayerNorm,
        'LayerNormGrad': LayerNorm,
        'RmsNorm': FusedRmsNorm,
        'RmsNormGrad': FusedRmsNorm,
        'FlashAttentionScore': FlashAttention,
        'FlashAttentionScoreGrad': FlashAttention
    }

    def get_output_shape(op_func, input_tensors):
        if isinstance(op_func, type):
            model = op_func()
            model.to(torch.cuda.current_device())
            output = model(input_tensors)
            torch.npu.synchronize()
        else:
            output = op_func(input_tensors)
            torch.npu.synchronize()
        return output.shape
    
    def predict_and_rectify(op_type, op_time, output_shape, input_shapes):
        """调用高斯误差模型对算子op_type的预测实验进行修正"""
        from .operator_model import Sampler, ARD_NUM_DIMS
        if not op_type in ARD_NUM_DIMS.keys():
            return op_time

        sys_config = get_system_config()
        if not hasattr(sys_config, 'noise_sampler'):
            setattr(sys_config, 'noise_sampler', Sampler(num_samples))
        op_time = sys_config.noise_sampler.run(op_type, op_time, output_shape, *input_shapes)
        return op_time

    # 若算子的名称为MatMulV?格式，则统一转为MatMul
    match = re.match(r"^(.*?)(V\d+)$", op_type)
    if match:
        op_type = match.group(1)

    op_func = _FUNC_TABLE.get(op_type, None)
    if not op_func:
        return .0
    
    if op_func == FlashAttention:
        input_shapes = input_shapes[:3]
        data_types = data_types[:3]

    if len(input_shapes) != len(data_types):
        logger.warning(f"{op_type} length of input_shapes({input_shapes}) and data_types({data_types}) are not consistent")
        return .0
    
    input_tensors = []
    for i in range(len(input_shapes)):
        tensor = GlobalMemoryBuffer.get_tensor(input_shapes[i], i).to(data_types[i])
        if not tensor.is_contiguous():
            tensor = tensor.contiguous()
        input_tensors.append(tensor)

    op_time = 0
    if isinstance(op_func, type):
        model = op_func()
        fwd_time, bwd_time = help_profiler(model, input_tensors)
        op_time = bwd_time if op_type.endswith('Grad') else fwd_time
    else:
        # warmup
        for _ in range(_ITERATION_LOOP_TIME // 2):
            op_func(input_tensors)
        
        op_times = []
        for _ in range(_ITERATION_LOOP_TIME):
            torch.npu.synchronize()
            start_time = time.time()
            op_func(input_tensors)
            torch.npu.synchronize()
            op_times.append((time.time() - start_time) * 1e6)
        
        op_times.remove(max(op_times))
        op_times.remove(min(op_times))
        op_time = sum(op_times) / len(op_times)
        fwd_time = op_time
        bwd_time = .0
    
    output_shape = get_output_shape(op_func, input_tensors)

    if not op_type.endswith('Grad'):
        operator_cache.record(op_type, input_shapes, output_shape, fwd_time, bwd_time)

    if get_system_config().use_operator_model:
        output_shape = list(output_shape)
        op_time = predict_and_rectify(op_type, op_time, output_shape, input_shapes)

    return op_time


def compute_block_time(operator_list: list, num_operator=0, num_samples=100):
    sum_time = 0

    Type_dict = {
        'hcom_allGather_': 'all_gather',
        'hcom_reduceScatter_': 'reduce_scatter',
        'hcom_allReduce_': 'all_reduce',
        'hcom_alltoall_': 'alltoall',
        'hcom_send_': 'isend',
        'hcom_receive_': 'irecv'
    }

    Data_Type_dict = {
        'DT_BF16': torch.bfloat16,
        'INT32': torch.int32,
        'BOOL': torch.bool,
        'FLOAT': torch.float,
        'INT64': torch.int64,
        'FLOAT16': torch.float16
    }

    def get_op_time(operator_list, index, is_comm_op):
        if is_comm_op:
            shape = operator_list[index]['Input Shape'][0]
            domain = operator_list[index]['Domains']
            op = operator_list[index]['Type']
            return CommProfiling.get_comm_time(shape, domain, op)
        else:
            input_shapes = operator_list[index]['Input Shape']
            data_type_str = operator_list[index]['Input Data Types'].split(';')
            data_type = [Data_Type_dict[key] for key in data_type_str if key in Data_Type_dict]
            op = operator_list[index]['Type']
            return profiler(op, input_shapes, data_type, num_samples)
        
        
    while num_operator < len(operator_list):
        
        if not operator_list[num_operator]['Input Shape']:
            logger.warning(f"{num_operator}-th operator-{operator_list[num_operator]}'shape is None")
            num_operator += 1
            continue

        op_name = operator_list[num_operator]['Type']
        if op_name.startswith('hcom_'):
            is_Async = operator_list[num_operator]['Async']
            if not is_Async:
                comm_time = get_op_time(operator_list, num_operator, True)
                sum_time += comm_time
                logger.debug(f"{num_operator}-th operator-{op_name} duration_time: {comm_time} us")
                num_operator += 1
            else:
                index = num_operator
                max_comm_time = 0
                # 连续下发多个异步hccl算子的场景
                while index < len(operator_list) and operator_list[index]['Type'].startswith('hcom_'):
                    comm_time = get_op_time(operator_list, index, True)
                    max_comm_time = max(max_comm_time, comm_time)
                    index += 1
                    if not operator_list[index - 1]['Async']:
                        break

                if index >= len(operator_list):
                    sum_time += max_comm_time
                    break

                if index - 1 >= 0 and operator_list[index - 1]['Type'].startswith('hcom_'):
                    sum_time += max_comm_time
                    num_operator = index
                    continue

                unoverlapped_comm_time = max_comm_time
                overlapped_comm_time = 0
                while index < len(operator_list) and unoverlapped_comm_time > 0:
                    op_time = get_op_time(operator_list, index, False)
                    bound_op_time = op_time
                    # 如果先下发hccl，再下发计算，存在下发bound硬编码为1000us
                    if index - 1 >= 0 and operator_list[index - 1]['Type'].startswith('hcom_'):
                        bound_op_time += 1000

                    unoverlapped_comm_time -= bound_op_time
                    overlapped_comm_time += bound_op_time
                    index += 1

                sum_time += overlapped_comm_time
                num_operator = index
        # 计算算子
        else:
            op_time = get_op_time(operator_list, num_operator, False)
            sum_time += op_time
            logger.debug(f"{num_operator}-th operator-{op_name} duration_time: {op_time} us")

            num_operator += 1

    return sum_time









