# coding=utf-8
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.

# This code is based on QuaRot(https://github.com/spcl/QuaRot/tree/main/quarot).
# Licensed under Apache License 2.0.

import typing
import torch
from utils.overwatch import initialize_overwatch
from quantize.utils.tools import cleanup_memory


logger = initialize_overwatch("vla_qat")


@torch.no_grad()
def fuse_ln_linear(
    layernorm: torch.nn.Module, linear_layers: typing.Iterable[torch.nn.Linear]
) -> None:
    """
    fuse the linear operations in Layernorm into the adjacent linear blocks.
    """
    for linear in linear_layers:
        linear_dtype = linear.weight.dtype

        # Calculating new weight and bias
        W_ = linear.weight.data.double()
        linear.weight.data = (W_ * layernorm.weight.double()).to(linear_dtype)

        if hasattr(layernorm, "bias"):
            if linear.bias is None:
                linear.bias = torch.nn.Parameter(
                    torch.zeros(linear.out_features, dtype=torch.float64)
                )
            linear.bias.data = linear.bias.data.double() + torch.matmul(
                W_, layernorm.bias.double()
            )
            linear.bias.data = linear.bias.data.to(linear_dtype)


@torch.no_grad()
def fuse_layer_norms(model: torch.nn.Module, fuse_token_embed=False) -> torch.nn.Module:
    kwargs = {"model": model}
    model = model.float()
    cleanup_memory()
    logger.info(
        f"Fusing layernorm {'and token embed' if fuse_token_embed else ''} for {model.__class__} with {len(model.model.layers)} layers"
    )

    res, inp = run_model_once(model)

    if fuse_token_embed:
        # Embedding fusion ?????
        for W in [model.model.embed_tokens]:
            W_ = W.weight.data.double()
            W.weight.data = (W_ - W_.mean(dim=-1, keepdim=True)).to(W.weight.data.dtype)

    layers:list[torch.nn.Module] = [layer for layer in model.model.layers]

    # Fuse the linear operations in Layernorm into the adjacent linear blocks.
    for layer in layers:
        # fuse the input layernorms into the linear layers
        fuse_ln_linear(
            layer.post_attention_layernorm, [layer.mlp.up_proj, layer.mlp.gate_proj]
        )
        fuse_ln_linear(
            layer.input_layernorm,
            [
                layer.self_attn.q_proj,
                layer.self_attn.k_proj,
                layer.self_attn.v_proj,
            ],
        )

        W_norm = layer.post_attention_layernorm.weight.data
        delattr(layer.post_attention_layernorm,"weight")
        layer.post_attention_layernorm.register_buffer("weight",torch.ones_like(W_norm))

        W_norm = layer.input_layernorm.weight.data
        delattr(layer.input_layernorm, "weight")
        layer.input_layernorm.register_buffer("weight",torch.ones_like(W_norm))

    fuse_ln_linear(
        model.model.norm,
        [model.lm_head],
    )
    W_norm = model.model.norm.weight.data
    delattr(model.model.norm, "weight")
    model.model.norm.register_buffer("weight",torch.ones_like(W_norm))
    # model.model.norm.weight.data = torch.ones_like(W_norm)

    logger.info("Model fused, checking outputs...")
    res_fused, inp = run_model_once(model, inp)
    if not fuse_token_embed:
        try:
            torch.testing.assert_close(
                torch.softmax(res, dim=-1, dtype=torch.float32),
                torch.softmax(res_fused, dim=-1, dtype=torch.float32),
                msg=lambda x: (logger.warning(f"Fused logits not equal:\n {x}!") or x),
            )
        except AssertionError as e:
            acc = (
                torch.argmax(res, dim=-1) == torch.argmax(res_fused, dim=-1)
            ).sum() / (res.shape[0] * res.shape[1] )
            logger.warning(f"acc after fused = {acc}")

    model = model.bfloat16()
    cleanup_memory()
    return model


@torch.no_grad()
def run_model_once(model, fake_data: torch.Tensor | None = None):
    if fake_data is None:
        fake_data = torch.randint(
            10, 10000, size=[8, 512], device=model.device, dtype=torch.long
        )
    result = model(input_ids=fake_data).logits
    return result, fake_data
