from concurrent.futures import ThreadPoolExecutor
from pathlib import Path
import sys
from typing import Dict, Type
import torch
from ..base import Quantizer,QuantModule
from ..quantizer import LSQPlusQuantizer


def time_counter(func):
    import time

    def inner(*args, **kwargs):
        s = time.perf_counter()
        res = func(*args, **kwargs)
        print(f"func {func} cost {time.perf_counter() - s} s")
        return res

    return inner


def get_save_rules():
    from ..int_linear import QuantLinear
    from ..int_matmul import QuantMatMul
    from ..rope import QuantApplyRotaryPosEmb
    from ..rmsnorm import QuantRMSNorm
    from ..softmax import QuantSoftmax
    from ..silu_ewm import QuantSigmoid, QuantHadamardProduct
    from ..layernorm import QuantLayerNorm
    from ..gelu import QuantGELU
    from ..models.modeling_vit import LayerScale

    rules = {
        LSQPlusQuantizer: {
            "scale": "scale.pth",
            "zero_point": "zero_point.pth",
            "x": "input.pth",
            "x_dequant": "x_dequant.pth",
            "x_int": "x_int_add_zp_clamp.pth",
        },
        LayerScale: {"scale_factor", "scale_factor.pth"},
        QuantLinear: {
            "weight": "weight.pth",
            "bias": "bias.pth",
            "out": "fp_out.pth",
        },
        QuantMatMul: {"out": "fp_out.pth"},
        QuantApplyRotaryPosEmb: {"out": "fp_out.pth"},
        QuantRMSNorm: {
            "scale": "scale.pth",
            "weight": "weight.pth",
            "var_int": "var_int.pth",
            "std_int": "std_int.pth",
            "x_int": "x_int.pth",
            "y_int": "y_int.pth",
            "w_div_2M": "w_div_2M.pth",
            "output": "output.pth",
        },
        QuantLayerNorm: {
            "scale": "scale.pth",
            "weight": "weight.pth",
            "bias": "bias.pth",
            "mean_int": "mean_int.pth",
            "var_int": "var_int.pth",
            "std_int": "std_int.pth",
            "x_int_sub_mean": "std_int.pth",
            "x_int": "x_int.pth",
            "y_int": "y_int.pth",
            "w_div_2M": "w_div_2M.pth",
            "output": "output.pth",
        },
        QuantSoftmax: {
            "scale_s": "scale.pth",
            "t1_s": "t1.pth",
            "z_s": "z.pth",
            "p_s": "p.pth",
            "v_s": "v.pth",
            "t2_s": "t2.pth",
            "t3_s": "t3.pth",
            "v_sum_s": "v_sum.pth",
            "v_t_i_al": "v_t_i_al.pth",
            "v_i_mask": "v_i_mask.pth",
            "x_int_s": "x_int.pth",
            "x_int_max_s": "x_int_max.pth",
            "x_int_sub_max_s": "x_int_sub_max.pth",
            "v": "result.pth",
        },
        QuantSigmoid: {
            "scale": "scale.pth",
            "x_int_s": "x_int.pth",
            "x_int_max_s": "x_int_max.pth",
            "x_int_sub_max_s": "x_int_sub_max.pth",
            "t1_i": "t1_i.pth",
            "t2_i": "t2_i.pth",
            "t3_i": "t3_i.pth",
            "l_t_i_tr": "l_tr.pth",
            "l_t_i": "l.pth",
            "z": "z.pth",
            "z0": "z0.pth",
            "p_i": "p.pth",
            "p_0_i": "p0.pth",
            "v_t_i_tr": "v_tr.pth",
            "v_t_i": "v.pth",
            "v_s_i_tr": "v_s.pth",
            "v_i_mask": "v_i_mask.pth",
            "l_i_mask": "l_i_mask.pth",
            "y_f": "y_f.pth",
            "y_i": "y_i.pth",
        },
        QuantHadamardProduct: {"output": "fp_out.pth"},
        QuantGELU: {
            "scale": "scale.pth",
            "x_int": "x_int.pth",
            "alpha_x_q": "alpha_x_q.pth",
            # "fused_s": "fused_s.pth",
            "alpha": "alpha.pth",
            "sigma": "sigma.pth",
            "sigma_q": "sigma_q.pth",
            "out": "output.pth",
            # "mask": "mask.pth",
            "gelu_out": "gelu_out.pth",
            # "relu_out": "relu_out.pth",
        },
    }
    return rules



@time_counter
@torch.inference_mode()
def save_locals_during_forward(
    model: torch.nn.Module,
    dtype: torch.dtype,
    input_data: Dict[str, torch.Tensor],
    save_rules: Dict[Type[torch.nn.Module], Dict[str, str]],
):
    """
    运行一次模型，按规则保存特定子模块的局部变量。

    Args:
        model: 目标PyTorch模型
        input_data: 模型输入数据
        save_rules: 保存规则字典，格式为:
            {
                nn.Conv2d: {"feature_maps": "conv_features.pt"},
                nn.Linear: {"weights": "linear_weights.pt"},
            }
    """
    # 存储当前正在处理的子模块和规则
    current_module = None
    current_rules = None

    # 全局跟踪函数
    @torch.inference_mode()
    def trace_locals(frame, event, arg):
        nonlocal current_module, current_rules

        # 仅处理 forward 内的行事件
        if event != "line" or (
            "_integer" not in frame.f_code.co_name
            and "forward" not in frame.f_code.co_name
        ):
            return trace_locals

        # 检查是否进入目标子模块的 forward
        module = frame.f_locals.get("self")
        if (
            isinstance(module, tuple(save_rules.keys()))
            and hasattr(module, "save_path")
            and module.save_path is not None
        ):
            current_module = module
            current_rules = save_rules[type(module)]
        elif module != current_module:
            current_module = None
            current_rules = None

        # 如果当前在目标子模块中，检查并保存变量
        if current_rules:
            base_path: Path = module.save_path
            for var_name, save_path in current_rules.items():
                if var_name in frame.f_locals:
                    torch.save(
                        (frame.f_locals[var_name]).to(dtype)
                        if torch.is_tensor(frame.f_locals[var_name])
                        else frame.f_locals[var_name],
                        base_path / save_path,
                    )
                elif getattr(module, var_name, None) is not None:
                    torch.save(
                        getattr(module, var_name).to(dtype)
                        if torch.is_tensor(getattr(module, var_name))
                        else getattr(module, var_name),
                        base_path / save_path,
                    )
                else:
                    # raise ValueError(f"Cant find tensor {var_name} in {module.name}!")
                    print(
                        f"Cant find tensor {var_name} in {module.name}.{frame.f_code.co_name}"
                    )
                    # raise ValueError(
                    #     f"Cant find tensor {var_name} in {module.name} with vars:{frame.f_locals}!"
                    # )

        return trace_locals
        # 注册跟踪并运行模型

    sys.settrace(trace_locals)
    output = model(**input_data)
    sys.settrace(None)
    return output


@time_counter
@torch.inference_mode()
def save_locals_during_forward_fake(
    model: torch.nn.Module,
    dtype: torch.dtype,
    input_data: Dict[str, torch.Tensor],
    save_rules: Dict[Type[torch.nn.Module], Dict[str, str]],
):
    """
    运行一次模型，按规则保存特定子模块的局部变量。

    Args:
        model: 目标PyTorch模型
        input_data: 模型输入数据
        save_rules: 保存规则字典，格式为:
            {
                nn.Conv2d: {"feature_maps": "conv_features.pt"},
                nn.Linear: {"weights": "linear_weights.pt"},
            }
    """
    # 存储当前正在处理的子模块和规则
    current_module = None
    current_rules = None

    # 全局跟踪函数
    @torch.inference_mode()
    def trace_locals(frame, event, arg):
        nonlocal current_module, current_rules

        # 仅处理 forward 内的行事件
        if event != "line" or (
            "_integer" not in frame.f_code.co_name
            and "forward" not in frame.f_code.co_name
        ):
            return trace_locals

        # 检查是否进入目标子模块的 forward
        module = frame.f_locals.get("self")
        if (
            isinstance(module, tuple(save_rules.keys()))
            and hasattr(module, "save_path")
            and module.save_path is not None
        ):
            current_module = module
            current_rules = save_rules[type(module)]
        elif module != current_module:
            current_module = None
            current_rules = None

        # 如果当前在目标子模块中，检查并保存变量
        if current_rules:
            base_path: Path = module.save_path
            for var_name, save_path in current_rules.items():
                continue
                if var_name in frame.f_locals:
                    torch.save(
                        (frame.f_locals[var_name]).to(torch.float16)
                        if torch.is_tensor(frame.f_locals[var_name])
                        else frame.f_locals[var_name],
                        base_path / save_path,
                    )
                elif getattr(module, var_name, None) is not None:
                    torch.save(
                        getattr(module, var_name).to(torch.float16)
                        if torch.is_tensor(getattr(module, var_name))
                        else getattr(module, var_name),
                        base_path / save_path,
                    )
                else:
                    # raise ValueError(f"Cant find tensor {var_name} in {module.name}!")
                    print(
                        f"Cant find tensor {var_name} in {module.name}.{frame.f_code.co_name}"
                    )
                    # raise ValueError(
                    #     f"Cant find tensor {var_name} in {module.name} with vars:{frame.f_locals}!"
                    # )

        return trace_locals
        # 注册跟踪并运行模型

    sys.settrace(trace_locals)
    output = model(**input_data)
    sys.settrace(None)
    return output


@time_counter
@torch.inference_mode()
def save_locals_during_forward_async(
    model: torch.nn.Module,
    dtype:torch.dtype,
    input_data: Dict[str, torch.Tensor],
    save_rules: Dict[Type[torch.nn.Module], Dict[str, str]],
    max_workers=64
):
    """
    运行一次模型，按规则保存特定子模块的局部变量，使用 CUDA 异步传输和多线程 I/O。
    """
    current_module = None
    current_rules = None
    executor = ThreadPoolExecutor(max_workers)

    def async_save(data, path):
        """异步保存函数，等待 CUDA 流同步"""
        stream = None
        if torch.is_tensor(data) and data.is_cuda:
            # 创建 CUDA 流并异步传输到 CPU
            stream = torch.cuda.Stream(priority=1)
            with torch.cuda.stream(stream):
                data = data.to(dtype=dtype,device="cpu",non_blocking=True)
        if stream is not None:
            stream.synchronize()  # 确保传输完成
        torch.save(data, path)
        print(f"saved {path}")

    # 全局跟踪函数
    @torch.inference_mode()
    def trace_locals(frame, event, arg):
        nonlocal current_module, current_rules

        if event != "line" or (
            "_integer" not in frame.f_code.co_name
            and "forward" not in frame.f_code.co_name
        ):
            return trace_locals

        module = frame.f_locals.get("self")
        if (
            isinstance(module, tuple(save_rules.keys()))
            and hasattr(module, "save_path")
            and module.save_path is not None
        ):
            current_module = module
            current_rules = save_rules[type(module)]
        elif module != current_module:
            current_module = None
            current_rules = None

        if current_rules:
            base_path: Path = module.save_path
            for var_name, save_path in current_rules.items():
                data = None
                if var_name in frame.f_locals:
                    data = frame.f_locals[var_name]
                elif getattr(module, var_name, None) is not None:
                    data = getattr(module, var_name)

                if data is not None:
                    # 提交异步保存任务
                    executor.submit(async_save, data, base_path / save_path)
                else:
                    pass

        return trace_locals

    sys.settrace(trace_locals)
    output = model(**input_data)
    sys.settrace(None)

    executor.shutdown(wait=True)
    return output


@torch.inference_mode()
def save_int_model(
    model: torch.nn.Module,
    input: dict[str, torch.Tensor],
    base_path: Path = Path("./hardware"),
):
    if torch.distributed.get_rank() == 0:
        base_path.mkdir(exist_ok=True, parents=True)

        for n, m in model.named_modules():
            if isinstance(m, (QuantModule, Quantizer)):
                m.save_path = base_path / m.name
                m.save_path.mkdir(exist_ok=True, parents=True)
                m.save_tensors = True

        for n, m in model.named_modules():
            if isinstance(m, (LSQPlusQuantizer,)):
                m.forward = m.forward_for_save

        for m in model.modules():
            if isinstance(m, QuantModule):
                m.set_quant_state(weight_quant=True, act_quant=True, integer_only=True)

        rules = get_save_rules()
        # save_locals_during_forward_async(model,torch.float16, input, rules)
        save_locals_during_forward(model,torch.float16, input, rules)
    torch.distributed.barrier()
