from pathlib import Path
from utils.overwatch import initialize_overwatch
from utils.accelerate.torch import config_torch_compile, use_spda_da2
from quantize.utils.tools import cleanup_memory
from quantize.config import QuantizationConfigArgs

import torch
import torch.distributed as dist
from torch import nn
from transformers import Trainer, TrainingArguments
from transformers.trainer_callback import PrinterCallback, ProgressCallback

from .optimizer import SGDG, create_custom_scheduler
from .fuse_ln import fuse_layer_norms
from .spin_blocks import build_spin_train_blocks

logger = initialize_overwatch("vla_qat")

config_torch_compile(True)


def get_training_args():
    args = TrainingArguments(
        bf16=True,
        per_device_train_batch_size=1,
        gradient_accumulation_steps=2,
        learning_rate=2,
        lr_scheduler_type="cosine_with_restarts",
        gradient_checkpointing=False,
        weight_decay=0.0,
        save_safetensors=False,
        # max_steps=2000,
        # num_train_epochs=1,
        max_steps=10000,
        logging_steps=5,
        ddp_find_unused_parameters=False,
        max_grad_norm=1.0,
        warmup_ratio=0.01,
    )
    return args


def train_spin(
    model: nn.Module,
    cal_dataset,
    action_tokenizer,
    collator,
    qat_args,
    test_batch_size=1,
) -> nn.Module:
    training_args = get_training_args()
    model.language_model = fuse_layer_norms(model.language_model, fuse_token_embed=False)
    model.language_model.config.use_cache = False
    with torch.inference_mode():
        from liger_kernel.transformers import apply_liger_kernel_to_llama

        apply_liger_kernel_to_llama(model=model.language_model)

        model.language_model.config._attn_implementation == "sdpa"
        model.eval()
        test_batch = {
            k: (
                (v.to(device=model.device, dtype=model.dtype))
                if v.dtype == torch.float32
                else v.to(model.device)
            )
            for k, v in collator(
                [cal_dataset[i] for i in range(test_batch_size)]
            ).items()
        }
        real_output = model(**test_batch)

    logger.info(f"loss after fuse: {real_output.loss}")
    model = build_spin_train_blocks(
        model, QuantizationConfigArgs(qat_args), compile_train=True
    )
    with torch.inference_mode():
        spin_output = model(**test_batch)
        logger.info(f"loss after spin: {spin_output.loss}")
        # torch.testing.assert_close(
        #     torch.softmax(real_output.logits, dtype=torch.float32, dim=-1),
        #     torch.softmax(spin_output.logits, dtype=torch.float32, dim=-1),
        # )
        # assert abs(real_output.loss - spin_output.loss) < 1e-4
    logger.info("Test passed, model output equal after spin")
    cleanup_memory()

    trainable_parameters = [model.language_model.R1.weight] + [
        model.language_model.model.layers[i].self_attn.R2.weight
        for i in range(model.language_model.config.num_hidden_layers)
    ]

    optimizer = SGDG(trainable_parameters, lr=training_args.learning_rate, stiefel=True)
    trainable_parameters_names = "\n".join(
        str(name) for name, p in model.named_parameters() if p.requires_grad
    )
    logger.info(f"Use SGDG for params:\n {trainable_parameters_names} ")
    logger.info("Strat training spin rotators")

    trainer = Trainer(
        model=model,
        args=training_args,
        data_collator=collator,
        train_dataset=cal_dataset,
        eval_dataset=None,
        optimizers=(
            optimizer,
            create_custom_scheduler(
                optimizer,
                training_args.max_steps,
                training_args.warmup_ratio * training_args.max_steps,
            ),
        ),
        callbacks=[ProgressCallback(), PrinterCallback()],
    )
    dist.barrier()
    cleanup_memory()
    try:
        trainer.train()
    except KeyboardInterrupt:
        logger.error("Ctrl+C pressed")
    except Exception as e:
        logger.error(f"Error {e} raised")
        raise
    finally:
        cleanup_memory()
        cpu_state = trainer.model.state_dict()

        R_dict = {
            key.replace(".weight", ""): value
            for key, value in cpu_state.items()
            if "R1.weight" in key or "self_attn.R2" in key
        }
        if logger.is_rank_zero:
            cache_dir = Path(__file__).parent.parent / "cache"
            cache_dir.mkdir(exist_ok=True, parents=True)
            cache_path = cache_dir / f"spin_{model.__class__.__name__}.pt"
            torch.save(R_dict, cache_path)
        dist.barrier()
