import functools
from pathlib import Path
import torch
import torch.nn as nn
from torch.nn.utils.rnn import pad_sequence
from torch.distributed import broadcast_object_list, get_rank, barrier, broadcast

from tqdm.rich import tqdm
from transformers.models.llama.modeling_llama import LlamaDecoderLayer, LlamaRMSNorm
from ..models.modeling_vit import Block


@torch.no_grad()
def get_act_scale(model, dataset, num_samples=128, batch_size=32):
    cache_name = f"{model.__class__}_{num_samples}_{batch_size}_smooth.pt"
    cache_dir = Path(__file__).parent / "cache"
    cache_dir.mkdir(exist_ok=True, parents=True)
    if (cache_dir / cache_name).exists():
        act_scales = torch.load(
            cache_dir / cache_name, map_location="cuda", weights_only=False
        )
        return {k: v.cuda().bfloat16() for k, v in act_scales.items()}

    act_scales = {}

    def stat_tensor(name, tensor):
        hidden_dim = tensor.shape[-1]
        tensor = tensor.view(-1, hidden_dim).abs().detach()
        comming_max = torch.max(tensor, dim=0)[0].cpu()
        if name in act_scales:
            act_scales[name] = torch.max(act_scales[name], comming_max)
        else:
            act_scales[name] = comming_max

    def stat_input_hook(m, x, y, name):
        if isinstance(x, tuple):
            x = x[0]
        stat_tensor(name, x)

    def pre_inp_idx(x):
        if isinstance(x, list):
            if torch.is_tensor(x[0]):
                x = pad_sequence(x, batch_first=True, padding_value=32000).to(
                    dtype=torch.long, device=model.device
                )
            else:
                x = torch.tensor(x, dtype=torch.long, device=model.device)
        elif torch.is_tensor(x):
            x = torch.as_tensor(x, dtype=torch.long, device=model.device)
        else:
            raise ValueError(f"{type(x)} {type(x[0])}")
        return x.view(batch_size, -1)[:, :2048]

    def pre_pix(x):
        if isinstance(x, list):
            if torch.is_tensor(x[0]):
                x = torch.stack(x).to(dtype=torch.bfloat16, device=model.device)
            else:
                x = torch.tensor(x, dtype=torch.long, device=model.device)
        elif torch.is_tensor(x):
            x = torch.as_tensor(x, dtype=torch.bfloat16, device=model.device)
        else:
            raise ValueError(f"{type(x)} {type(x[0])}")
        return x.view(batch_size, 6, 224, 224)

    pre_process_map = {
        "pixel_values": pre_pix,
        "input_ids": pre_inp_idx,
    }
    ref_batch = dataset[:batch_size]

    ref_inps = {
        k: pre_process_map.get(k, lambda x: None)(v) for k, v in ref_batch.items()
    }
    ref_inps["attention_mask"] = ref_inps["input_ids"].ne(32000)

    hooks = []
    for name, m in model.named_modules():
        if isinstance(m, nn.Linear):
            hooks.append(
                m.register_forward_hook(functools.partial(stat_input_hook, name=name))
            )

    for i in tqdm(
        range(num_samples), total=num_samples, desc="Smooth model", unit="batch"
    ):
        batch = dataset[i * batch_size : (i + 1) * batch_size]

        inps = {k: pre_process_map.get(k, lambda x: None)(v) for k, v in batch.items()}
        inps["attention_mask"] = inps["input_ids"].ne(32000)
        model(**inps)

    for h in hooks:
        h.remove()

    torch.save(act_scales, cache_dir / cache_name)
    return {k: v.cuda().bfloat16() for k, v in act_scales.items()}


@torch.no_grad()
def apply_smooth_linear(
    model: nn.Module,
    dataset,
    alpha=0.5,
    num_samples=256,
    batch_size=32,
    cal_batch: dict[str, torch.Tensor] | None = None,
) -> nn.Module:
    model.eval()

    if get_rank() == 0:
        act_scales = get_act_scale(
            model, dataset, num_samples=num_samples, batch_size=batch_size
        )
        act_b = [{k: v.shape for k, v in act_scales.items()}]
    else:
        act_b = [None]

    barrier(device_ids=get_rank())
    broadcast_object_list(act_b, src=0)
    if get_rank() != 0:
        act_scales = {
            k: torch.empty(*v, dtype=torch.bfloat16, device="cuda")
            for k, v in act_b[0].items()
        }
    for k, v in act_scales.items():
        broadcast(v, src=0)
    act_scales = act_scales
    real_out = model(**cal_batch).loss
    for name, module in model.named_modules():
        if isinstance(module, (LlamaDecoderLayer)):
            attn_ln = module.input_layernorm  # attention forward norm
            qkv = [
                module.self_attn.q_proj,
                module.self_attn.k_proj,
                module.self_attn.v_proj,
            ]

            qkv_input_scales = act_scales[name + ".self_attn.q_proj"]
            smooth_ln_fcs_llama_like(attn_ln, qkv, qkv_input_scales, alpha)
            # smooth_single_linear(
            #     module.self_attn.o_proj, act_scales[name + ".self_attn.o_proj"], alpha
            # )

            ffn_ln = module.post_attention_layernorm  # feed forward norm
            fcs = [module.mlp.gate_proj, module.mlp.up_proj]
            fcs_input_scales = act_scales[name + ".mlp.gate_proj"]

            smooth_ln_fcs_llama_like(ffn_ln, fcs, fcs_input_scales, alpha)
            # smooth_single_linear(
            #     module.mlp.down_proj, act_scales[name + ".mlp.down_proj"], alpha
            # )

        if isinstance(module, (Block,)):
            attn_ln = module.norm1  # attention forward norm
            qkv = [
                module.attn.qkv,
                # module.attn.k_proj,
                # module.attn.v_proj,
            ]

            qkv_input_scales = act_scales[name + ".attn.qkv"]
            smooth_ln_fcs_vit_like(attn_ln, qkv, qkv_input_scales, alpha)
            # smooth_single_linear(
            #     module.attn.proj, act_scales[name + ".attn.proj"], alpha
            # )

            ffn_ln = module.norm2  # feed forward norm
            fcs = [module.mlp.fc1]
            fcs_input_scales = act_scales[name + ".mlp.fc1"]

            smooth_ln_fcs_vit_like(ffn_ln, fcs, fcs_input_scales, alpha)
            # smooth_single_linear(module.mlp.fc2, act_scales[name + ".mlp.fc2"], alpha)
    with torch.inference_mode():
        smooth_out = model(**cal_batch).loss
        torch.testing.assert_close(real_out, smooth_out)
    return model


@torch.no_grad()
def smooth_ln_fcs_llama_like(ln, fcs, act_scales, alpha=0.5):
    if not isinstance(fcs, list):
        fcs = [fcs]
    assert isinstance(ln, (LlamaRMSNorm,)), type(ln)
    for fc in fcs:
        assert isinstance(fc, nn.Linear)
        assert ln.weight.numel() == fc.in_features == act_scales.numel()
    device, dtype = fcs[0].weight.device, fcs[0].weight.dtype
    act_scales = act_scales.to(device=device, dtype=dtype).to(torch.float32)
    weight_scales = torch.cat(
        [fc.weight.abs().max(dim=0, keepdim=True)[0] for fc in fcs], dim=0
    )
    weight_scales = weight_scales.max(dim=0)[0].clamp(min=1e-5).to(torch.float32)
    scales = (
        (act_scales.pow(alpha) / weight_scales.pow(1 - alpha))
        .clamp(min=1e-5)
        .to(device)
    )

    ln.weight.data = ln.weight.data.to(torch.float32).div(scales).to(dtype)
    for fc in fcs:
        fc.weight.data = fc.weight.data.to(torch.float32).mul(scales.view(1, -1)).to(dtype)


@torch.no_grad()
def smooth_ln_fcs_vit_like(ln, fcs, act_scales, alpha=0.5):
    if not isinstance(fcs, list):
        fcs = [fcs]
    assert isinstance(ln, nn.LayerNorm)
    for fc in fcs:
        assert isinstance(fc, nn.Linear)
        assert ln.weight.numel() == fc.in_features == act_scales.numel()

    device, dtype = fcs[0].weight.device, fcs[0].weight.dtype
    act_scales = act_scales.to(device=device, dtype=dtype)
    weight_scales = torch.cat(
        [fc.weight.abs().max(dim=0, keepdim=True)[0] for fc in fcs], dim=0
    )
    weight_scales = weight_scales.max(dim=0)[0].clamp(min=1e-5).to(torch.float32)

    scales = (
        (act_scales.pow(alpha) / weight_scales.pow(1 - alpha))
        .clamp(min=1e-5)
        .to(device)
    )


    ln.weight.data = ln.weight.data.to(torch.float32).div(scales).to(dtype)
    ln.bias.data = ln.bias.data.to(torch.float32).div(scales).to(dtype)
    for fc in fcs:
        fc.weight.data = fc.weight.data.to(torch.float32).mul(scales.view(1, -1)).to(dtype)


@torch.no_grad()
def smooth_single_linear(fc: nn.Linear, act_scales, alpha=0.5):
    device, dtype = fc.weight.device, fc.weight.dtype
    act_scales = act_scales.to(device=device, dtype=dtype)
    # weight_scales = torch.cat(
    #     [fc.weight.abs().max(dim=0, keepdim=True)[0] for fc in fcs], dim=0
    # )
    weight_scales = fc.weight.abs().max(dim=0)[0].clamp(min=1e-5)

    scales = (
        (act_scales.pow(alpha) / weight_scales.pow(1 - alpha))
        .clamp(min=1e-5)
        .to(device)
        .to(dtype)
    )

    # ln.weight.div_(scales)
    # ln.bias.div_(scales)

    fc.weight.mul_(scales.view(1, -1))
    fc.register_buffer("act_scale", scales, persistent=False)
