import logging
from pathlib import Path
import re
from typing import Optional
import torch
from types import MethodType

from quantize.int_linear import QuantLinear
from utils.overwatch import initialize_overwatch
from ..blocks import quant_llama_model, quant_proj, quant_vit, QuantModule, QuantBlock
from ..config import QuantizationConfigArgs, QuantArguments
from ..quantizer import LSQPlusQuantizer, Quantizer
from .params import disable_params, print_model_param_size, check_trainable_parameters
from .tools import cleanup_memory

logger = initialize_overwatch("vla_qat")


@torch.no_grad
def set_module_names(model: torch.nn.Module):
    for name, m in model.named_modules():
        m.name = name


def find_latest_pytorch_model_with_max(runs_dir: str) -> Optional[str]:
    """
    高效查找最新创建的 pytorch_model.bin（使用 max 替代排序）

    逻辑：
    1. 遍历 runs 下的所有实验目录
    2. 对每个实验目录，检查是否存在有效的 checkpoint/pytorch_model.bin
    3. 用 max 直接跟踪最新创建的实验目录中的模型路径

    参数:
        runs_dir (str): runs 目录路径（如 'runs'）

    返回:
        Optional[str]: 最新的模型路径，若无则返回 None
    """
    runs_dir = Path(runs_dir)
    if not runs_dir.exists() or not runs_dir.is_dir():
        return None

    checkpoint_pattern = re.compile(r"checkpoint-(\d+)$")
    latest_model_path = None
    latest_ctime = 0  # 初始化时间为 0

    # 遍历所有实验目录
    for exp_dir in runs_dir.iterdir():
        if not exp_dir.is_dir():
            continue

        # 查找该实验目录下所有有效的 checkpoint
        for checkpoint_dir in exp_dir.glob("checkpoint-*"):
            if checkpoint_dir.is_dir() and checkpoint_pattern.fullmatch(
                checkpoint_dir.name
            ):
                model_path = checkpoint_dir / "pytorch_model.bin"
                if model_path.exists():
                    # 比较实验目录的创建时间，更新最新路径
                    exp_ctime = exp_dir.stat().st_ctime
                    if exp_ctime > latest_ctime:
                        latest_ctime = exp_ctime
                        latest_model_path = model_path

    return str(latest_model_path) if latest_model_path else None


@torch.no_grad
def load_pretrained(
    model: torch.nn.Module,
    state_path: str | list[str],
    use_compiled=False,
    ignore_filter: None | str = None,
    device="cuda",
    dtype=torch.bfloat16,
):
    # a = model.vision_backbone.fused_featurizer.blocks[15].mlp.act.gelu_alpha.data.clone()
    # logger.info(f"a before load:{a}")

    if isinstance(state_path, str):
        state_dict: dict = torch.load(
            state_path, map_location="cpu", weights_only=False
        )
    elif isinstance(state_path, list):
        state_dict: dict = {}
        for s in state_path:
            state_dict.update(torch.load(s, map_location="cpu", weights_only=False))
    if ignore_filter is not None:
        state_dict = {
            k: v
            for k, v in state_dict.items()
            if not any((n and n in k) for n in ignore_filter.split(";"))
        }
    if not use_compiled:
        state_dict: dict = {
            k.replace("._orig_mod", ""): v for k, v in state_dict.items()
        }
    try:
        # logger.info("\n".join(str(i) for i in state_dict.keys() if "language_model" not in str(i)))
        model.load_state_dict(state_dict, strict=False)
        # for k in state_dict.keys():
        #     if "gelu" in k:
        #         logger.info(f"Loading {k}")

    except Exception as e:
        logger.error(str(list(set(state_dict.keys()) - set(model.state_dict().keys()))))
        # raise
        logger.error(str(e)[:200])
        # import traceback; traceback.print_exc();
        exit(0)

    # assert not torch.allclose(
    #     a,
    #     state_dict[
    #         "vision_backbone.fused_featurizer.blocks.15._orig_mod.mlp.act.gelu_alpha"
    #     ].to(a),
    # ), (
    #     f"{torch.nn.functional.mse_loss(a, state_dict['vision_backbone.fused_featurizer.blocks.15._orig_mod.mlp.act.gelu_alpha'].to(a)):.6f}"
    # )
    # assert not torch.allclose(
    #     a, model.language_model.model.layers[31].mlp.down_proj.weight.to(a)
    # ), (
    #     f"{torch.nn.functional.mse_loss(a, model.language_model.model.layers[31].mlp.down_proj.weight):.6f}"
    # )
    model = model.to(device=device, dtype=dtype)

    return model


@torch.no_grad()
def quant_vla_model(model, args, qvit1=True, qvit2=True, qproj=True, qllama=True):
    if qvit1:
        model.vision_backbone.featurizer = quant_vit(
            model.vision_backbone.featurizer, args, use_diff_loss=False
        )
    else:
        disable_params(model.vision_backbone.featurizer, all_disable=True)

    if qvit2:
        model.vision_backbone.fused_featurizer = quant_vit(
            model.vision_backbone.fused_featurizer, args, use_diff_loss=False
        )
    else:
        disable_params(model.vision_backbone.fused_featurizer, all_disable=True)

    if qproj:
        model.projector = quant_proj(model.projector, args, use_diff_loss=False)
    else:
        disable_params(model.projector, all_disable=True)

    if qllama:
        model.language_model.model = quant_llama_model(model.language_model.model, args)
    else:
        disable_params(model.language_model, all_disable=True)
    return model


def compile_layers(model: torch.nn.Module) -> torch.nn.Module:
    for i in range(len(model.vision_backbone.featurizer.blocks)):
        model.vision_backbone.featurizer.blocks[i].compile()
        # model.vision_backbone.featurizer.blocks[i].norm1.compile()
        # model.vision_backbone.featurizer.blocks[i].norm2.compile()
        # model.vision_backbone.featurizer.blocks[i].attn.compile()
        # model.vision_backbone.featurizer.blocks[i].compile()
        # model.vision_backbone.featurizer.blocks[i] = torch.compile(
        #     model.vision_backbone.featurizer.blocks[i],
        #     # fullgraph=True,
        #     # mode="max-autotune",
        #     # backend="cudagraphs",
        # )

    for i in range(len(model.vision_backbone.fused_featurizer.blocks)):
        model.vision_backbone.fused_featurizer.blocks[i].compile()
        # model.vision_backbone.fused_featurizer.blocks[i].norm1.compile()
        # model.vision_backbone.fused_featurizer.blocks[i].norm2.compile()
        # model.vision_backbone.fused_featurizer.blocks[i].attn.compile()
        # model.vision_backbone.fused_featurizer.blocks[i] = torch.compile(
        #     model.vision_backbone.fused_featurizer.blocks[i],
        #     # fullgraph=True,
        #     # mode="max-autotune",
        #     # backend="cudagraphs",
        # )

    # model.vision_backbone.featurizer = torch.compile(model.vision_backbone.featurizer)
    # model.vision_backbone.fused_featurizer = torch.compile(
    #     model.vision_backbone.fused_featurizer
    # )

    model.projector.compile()
    # model.projector = torch.compile(
    #     model.projector,
    #     fullgraph=True,
    #     # mode="max-autotune",
    #     # backend="cudagraphs",
    # )

    for i in range(len(model.language_model.model.layers)):
        pass
        # model.language_model.model.layers[i].self_attn = torch.compile(
        #     model.language_model.model.layers[i].self_attn,
        #     # fullgraph=True,
        #     # mode="max-autotune",
        #     # mode="reduce-overhead",
        #     # backend="cudagraphs",
        # )
        # model.language_model.model.layers[i].mlp = torch.compile(
        #     model.language_model.model.layers[i].mlp,
        #     # fullgraph=True,
        #     # mode="max-autotune",
        #     # mode="reduce-overhead",
        #     # backend="cudagraphs",
        # )
        model.language_model.model.layers[i].compile()
    model.language_model.model.norm.compile()
    # model.language_model.model._m[1] = torch.compile(model.language_model.model._m[1])

    # cleanup_memory()
    return model


@torch.no_grad
def patch_and_quant_model_for_train(
    model: torch.nn.Module,
    args: QuantArguments,
    layer_compile=True,
    quantizer_compile=False,
    linear_only=False,
    qvit1=True,
    qvit2=True,
    qproj=True,
    qllama=True,
    qact=True,
    qweight=True,
    cal_dataset=None,
    len_cal_dataset=0,
    stat_quantizer=False,
    cache_subfix="",
) -> torch.nn.Module:
    model.config.use_cache = False
    config_args = QuantizationConfigArgs(args)
    model = model.bfloat16().cuda().eval()

    print_model_param_size(model)

    model = quant_vla_model(
        model, config_args, qvit1=qvit1, qvit2=qvit2, qproj=qproj, qllama=qllama
    )
    disable_params(model)

    # for name, p in tuple(model.named_parameters()):
    #     # if ("scale" not in name and "zero_point" not in name) or "scale_factor" in name:
    #     # if "quantizer" not in name:
    #     #     m_names = name.split(".")
    #     #     m = model
    #     #     for n in m_names[:-1]:
    #     #         if hasattr(m, n):
    #     #             m = getattr(m, n)
    #     #     if not isinstance(m,QuantLinear):
    #     #         n = m_names[-1] if len(m_names) > 1 else name
    #     #         # print(f"del {m} {n}")
    #     #         delattr(m, n)
    #     #         p.data.requires_grad = False
    #     #         m.register_buffer(n, p.data.bfloat16(), persistent=False)
    #     train_params = [
    #         n
    #         for n, p in model.named_parameters()
    #         if any(n.startswith(f"language_model.model.layers.{i}") for i in range(30,32))
    #     ]
    #     if name not in train_params:
    #         p.requires_grad  = False
    #         # m_names = name.split(".")
    #         # m = model
    #         # for n in m_names[:-1]:
    #         #     if hasattr(m, n):
    #         #         m = getattr(m, n)
    #         # # if not isinstance(m,QuantLinear):
    #         # n = m_names[-1] if len(m_names) > 1 else name
    #         # # print(f"del {m} {n}")
    #         # logger.info(f"del {name} {n}")
    #         # delattr(m, n)
    #         # p.data.requires_grad = False
    #         # m.register_buffer(n, p.data.bfloat16(), persistent=False)
    subfix = ""
    if qllama:
        subfix += "_llama"
    if qvit1:
        subfix += "_vit1"
    if qvit2:
        subfix += "_vit2"
    if qproj:
        subfix += "_qproj"
    if linear_only:
        subfix += "_linear"

    subfix += "_w" if qweight else "_now"
    subfix += "_a" if qact else "_noa"
    subfix += cache_subfix

    for m in model.modules():
        if isinstance(m, QuantModule):
            m.set_quant_state(
                weight_quant=qweight, act_quant=qact, linear_only=linear_only
            )
            # if not isinstance(m,QuantLinear):
            #     m.set_quant_state(
            #         weight_quant=True, act_quant=False, linear_only=linear_only
            #     )
        if isinstance(m, Quantizer):
            m.compile_quantizer = quantizer_compile

    if linear_only:
        from ..blocks import QuantLlamaAttn, QuantVitAttn, QuantLlamaDecoderLayer

        QuantLlamaDecoderLayer.forward = QuantLlamaDecoderLayer.forward_linear
        QuantLlamaAttn.forward = QuantLlamaAttn.forward_linear
        QuantVitAttn.forward = QuantVitAttn.forward_linear

    model = model.cuda().bfloat16().eval()
    # if torch.distributed.get_rank() == 0:
    set_module_names(model)

    LSQPlusQuantizer.qat_calibration_quantizer(
        model, cal_datasets=cal_dataset, len_cal_dataset=len_cal_dataset, subfix=subfix
    )
    if stat_quantizer:
        LSQPlusQuantizer.stat_quantizer(model, next(cal_dataset[0]))
        torch.distributed.barrier(device_ids=torch.distributed.get_rank())
        exit(0)
    # from itertools import chain

    # for param in logger.tqdm(
    #     list(model.parameters()),
    #     desc="broadcast quant parameters",
    # ):
    #     assert torch.distributed.broadcast(param.data, src=0) is None

    model.check_trainable_parameters = MethodType(check_trainable_parameters, model)
    model.print_model_param_size = MethodType(print_model_param_size, model)
    cleanup_memory()
    set_module_names(model)

    # if layer_compile:
    #     compile_layers(model)

    return model


def patch_trainable_gelu(model, trainable=False, grad=True):
    from .. import gelu

    if trainable:
        if not hasattr(model, "gelu_alpha"):
            model.gelu_alpha = torch.nn.Parameter(
                torch.tensor([1.743], dtype=torch.float32, device="cuda"),
                requires_grad=grad,
            )

        gelu.gelu_alpha = model.gelu_alpha
    logger.info(f"gelu alpha set as {gelu.gelu_alpha}")
