from utils.overwatch import initialize_overwatch
from quantize.utils.tools import cleanup_memory
import torch
import torch.nn as nn
from pathlib import Path
from transformers.models.llama.modeling_llama import LlamaForCausalLM

from .fuse_ln import fuse_layer_norms
from .hadamard_utils import (
    apply_exact_had_to_linear,
)

logger = initialize_overwatch("vla_qat")


def rotate_proj(model, R1: torch.Tensor) -> None:
    # Rotate the embeddings.
    for W in [model.projector.fc3]:
        dtype = W.weight.data.dtype
        W_ = W.weight.data.to(device="cuda", dtype=torch.float64)
        W.weight.data = torch.matmul(R1.T, W_).to(device="cpu", dtype=dtype)


def rotate_embeddings(model, R1: torch.Tensor) -> None:
    # Rotate the embeddings.
    for W in [model.model.embed_tokens]:
        dtype = W.weight.data.dtype
        W_ = W.weight.data.to(device="cuda", dtype=torch.float64)
        W.weight.data = torch.matmul(W_, R1).to(device="cpu", dtype=dtype)


def rotate_attention_inputs(layer, R1) -> None:
    # Rotate the WQ, WK and WV matrices of the self-attention layer.
    for W in [layer.self_attn.q_proj, layer.self_attn.k_proj, layer.self_attn.v_proj]:
        dtype = W.weight.dtype
        W_ = W.weight.to(device="cuda", dtype=torch.float64)
        W.weight.data = torch.matmul(W_, R1).to(device="cpu", dtype=dtype)

def rotate_attention_qkv_inputs(layer, R1) -> None:
    # Rotate the WQ, WK and WV matrices of the self-attention layer.
    for W in [layer.self_attn.qkv_proj]:
        dtype = W.weight.dtype
        W_ = W.weight.to(device="cuda", dtype=torch.float64)
        W.weight.data = torch.matmul(W_, R1).to(device="cpu", dtype=dtype)


def rotate_attention_output(layer, R1) -> None:
    # Rotate output matrix of the self-attention layer.
    W = layer.self_attn.o_proj

    dtype = W.weight.data.dtype
    W_ = W.weight.data.to(device="cuda", dtype=torch.float64)
    W.weight.data = torch.matmul(R1.T, W_).to(device="cpu", dtype=dtype)
    if W.bias is not None:
        b = W.bias.data.to(device="cuda", dtype=torch.float64)
        W.bias.data = torch.matmul(R1.T, b).to(device="cpu", dtype=dtype)


def rotate_mlp_input(layer, R1):
    # Rotate the MLP input weights.
    mlp_inputs = [layer.mlp.up_proj, layer.mlp.gate_proj]
    for W in mlp_inputs:
        dtype = W.weight.dtype
        W_ = W.weight.data.to(device="cuda", dtype=torch.float64)
        W.weight.data = torch.matmul(W_, R1).to(device="cpu", dtype=dtype)


def rotate_mlp_output(layer, R1):
    # Rotate the MLP output weights and bias.
    W = layer.mlp.down_proj
    dtype = W.weight.data.dtype
    W_ = W.weight.data.to(device="cuda", dtype=torch.float64)
    W.weight.data = torch.matmul(R1.T, W_).to(device="cpu", dtype=dtype)
    # apply_exact_had_to_linear(
    #     W, had_dim=-1, output=False
    # )  # apply exact (inverse) hadamard on the weights of mlp output
    if W.bias is not None:
        b = W.bias.data.to(device="cuda", dtype=torch.float64)
        W.bias.data = torch.matmul(R1.T, b).to(device="cpu", dtype=dtype)


def rotate_head(model, R1: torch.Tensor) -> None:
    # Rotate the head.
    W = model.lm_head
    dtype = W.weight.data.dtype
    W_ = W.weight.data.to(device="cuda", dtype=torch.float64)
    W.weight.data = torch.matmul(W_, R1).to(device="cpu", dtype=dtype)


def rotate_ov_proj(layer, head_num, head_dim, R2=None):
    v_proj = layer.self_attn.v_proj
    o_proj = layer.self_attn.o_proj

    apply_exact_had_to_linear(v_proj, had_dim=head_dim, output=True, R2=R2)
    apply_exact_had_to_linear(o_proj, had_dim=head_dim, output=False, R2=R2)


@torch.no_grad()
def rotator_model(model: LlamaForCausalLM, r_path: Path) -> nn.Module:
    r_dict = {
        k.replace("._orig_mod", "").replace("language_model.", ""): v
        for k, v in torch.load(r_path, map_location="cpu", weights_only=False).items()
    }
    model = fuse_layer_norms(model, False)


    R1 = r_dict["R1"].cuda().to(torch.float64)
    config = model.config
    num_heads = config.num_attention_heads
    model_dim = config.hidden_size
    head_dim = model_dim // num_heads

    rotate_embeddings(model, R1)
    rotate_head(model, R1)
    cleanup_memory()

    layers = [layer for layer in model.model.layers]
    for idx, layer in enumerate(logger.tqdm(layers, unit="layer", desc="Rotating")):
        key = f"model.layers.{idx}.self_attn.R2"
        R2 = r_dict[key].cuda().to(torch.float64)
        rotate_attention_inputs(layers[idx], R1)
        rotate_attention_output(layers[idx], R1)
        rotate_mlp_input(layers[idx], R1)
        rotate_mlp_output(layers[idx], R1)
        rotate_ov_proj(layers[idx], num_heads, head_dim, R2=R2)
    return model

@torch.no_grad()
def rotator_model_online(model: LlamaForCausalLM,r1:torch.Tensor,use_pretrained = False) -> nn.Module:
    rotate_embeddings(model.language_model, r1)
    rotate_head(model.language_model, r1)
    rotate_proj(model, r1)
    cleanup_memory()
    if not use_pretrained:
        layers = [layer for layer in model.language_model.model.layers]
        for idx, layer in enumerate(logger.tqdm(layers, unit="layer", desc="Rotating")):
            rotate_attention_inputs(layers[idx], r1)
            rotate_attention_output(layers[idx], r1)
            rotate_mlp_input(layers[idx], r1)
            rotate_mlp_output(layers[idx], r1)
    return model.cuda()
