from collections import deque
from dataclasses import dataclass, field
from pathlib import Path
import torch.distributed as dist
import torch
from accelerate import PartialState
from transformers import (
    TrainingArguments,
    Trainer,
)
from transformers import logging as hf_logging
from transformers.trainer_callback import ProgressCallback, PrinterCallback
import rich

# import wandb


from quantize.base import QuantModule
from quantize.int_linear import QuantLinear
from quantize.silu_ewm import QuantSigmoid
from quantize.softmax import QuantSoftmax
from quantize.rmsnorm import QuantRMSNorm
from quantize.silu_ewm import QuantSigmoid, QuantFusedSiLU, QuantHadamardProduct
from quantize.int_matmul import QuantMatMul
from quantize.rope import QuantApplyRotaryPosEmb
from quantize import Quantizer
from quantize import (
    build_vla,
    patch_and_quant_model_for_train,
    load_pretrained,
    save_int_model,
    cleanup_memory,
    patch_trainable_gelu,
)
from quantize.config import QATArguments
from quantize.methods.smooth import apply_smooth_linear
from quantize.blocks import VitDiffLossWrapper, QuantVitAttn
from quantize.utils.params import disable_params, disable_module
from quantize.utils.prepare_model import compile_layers

from utils.accelerate.torch import config_torch_compile
from utils.accelerate.liger import init_liger


from utils.dataset.vla_qat_dataset import (
    prepare_dataset,
    prepare_vla_tokenizer,
    get_first_data,
)
from utils.testing.eval_uitls import eval_model
from utils.overwatch import initialize_overwatch
from quantize.utils.grads import add_gradient_hooks

logger = initialize_overwatch("vla_qat")
init_liger(output_loss=True)
config_torch_compile(True)
quant_modules = (
    QuantMatMul,
    QuantApplyRotaryPosEmb,
    QuantFusedSiLU,
    QuantRMSNorm,
    QuantSoftmax,
)

quant_attn_modules = (
    QuantMatMul,
    QuantApplyRotaryPosEmb,
    QuantSoftmax,
)
quant_mlp_modules = (QuantFusedSiLU,)


class RichProgressCallback(ProgressCallback):
    def on_train_begin(self, args, state, control, **kwargs):
        if state.is_world_process_zero:
            self.training_bar = logger.tqdm(total=state.max_steps, dynamic_ncols=True)
        self.current_step = 0

    def on_log(self, args, state, control, logs=None, **kwargs):
        pass
        # msg = " | ".join(
        #     f"{k}={v:.4f}" if k != "lr" else f"{k}={v:.4e}" for k, v in logs.items()
        # )

        # # logger.info(msg)
        # rich.get_console().print(msg)


class RichPrinterCallback(PrinterCallback):
    def on_log(self, args, state, control, logs: dict | None = None, **kwargs):
        if state.is_local_process_zero:
            msg = " | ".join(
                f"{k}={v:.4f}" if k != "lr" else f"{k}={v:.4e}" for k, v in logs.items()
            )
            rich.get_console().print(msg)

    # )


class QuantTrainer(Trainer):
    def __init__(self, *args, action_tokenizer=None, diff_loss=False, **kwargs):
        super().__init__(*args, **kwargs)
        self.model_accepts_loss_kwargs = False
        self.action_tokenizer = action_tokenizer
        self.recent_losses = deque(maxlen=self.args.gradient_accumulation_steps)
        self.recent_diff_losses = deque(maxlen=self.args.gradient_accumulation_steps)
        self.recent_action_accuracies = deque(
            maxlen=self.args.gradient_accumulation_steps
        )
        self.recent_l1_losses = deque(maxlen=self.args.gradient_accumulation_steps)
        self.final_acc = 0.0
        self.diff_loss = diff_loss
        self.grad_norm_threshold = -1

    def compute_loss(
        self,
        model: torch.nn.Module,
        inputs,
        num_items_in_batch=None,
        return_outputs=False,
    ):
        model.module.diff_dict = {}
        # model.module.vision_backbone.fused_featurizer.diff_loss = torch.tensor(
        #     0.0, device="cuda", dtype=torch.bfloat16, requires_grad=True
        # )
        # model.module.vision_backbone.featurizer.diff_loss = torch.tensor(
        #     0.0, device="cuda", dtype=torch.bfloat16, requires_grad=True
        # )
        # model.module.projector.diff_loss = torch.tensor(
        #     0.0, device="cuda", dtype=torch.bfloat16, requires_grad=True
        # )
        # model.module.language_model.model.diff_loss = torch.tensor(
        #     0.0, device="cuda", dtype=torch.bfloat16, requires_grad=True
        # )
        # 前向传播
        outputs = model(
            input_ids=inputs["input_ids"],
            attention_mask=inputs["attention_mask"],
            pixel_values=inputs["pixel_values"].to(torch.bfloat16),
            labels=inputs["labels"],
            # return_dict=True,
        )
        with torch.inference_mode():
            # 计算动作准确率和L1损失
            # torch.cuda.empty_cache()
            action_logits = outputs.logits[:, 256:-1].cpu()
            action_preds = action_logits.argmax(dim=2)
            action_gt = inputs["labels"].to(action_preds.device)[:, 1:]
            mask = action_gt > self.action_tokenizer.action_token_begin_idx  # type: ignore

            # 计算准确率
            correct_preds = (action_preds == action_gt) & mask
            action_accuracy = correct_preds.sum() / mask.sum()

            #     #     #     # 计算L1损失
            #     #     #     # continuous_actions_pred = torch.tensor(
            #     #     #     #     self.action_tokenizer.decode_token_ids_to_actions(  # type: ignore
            #     #     #     #         action_preds[mask].cpu().numpy()
            #     #     #     #     )
            #     #     #     # )
            #     #     #     # continuous_actions_gt = torch.tensor(
            #     #     #     #     self.action_tokenizer.decode_token_ids_to_actions(  # type: ignore
            #     #     #     #         action_gt[mask].cpu().numpy()
            #     #     #     #     )
            #     #     #     # )
            #     #     #     # action_l1_loss = torch.nn.functional.l1_loss(
            #     #     #     #     continuous_actions_pred, continuous_actions_gt
            #     #     #     # )

            #     #     #     # 存储最近的指标
            #     #     #     self.recent_losses.append(outputs.loss.item())
            self.recent_action_accuracies.append(action_accuracy.item())
        # # #     # self.recent_l1_losses.append(action_l1_loss.item())
        # 存储最近的指标
        self.recent_losses.append(1)
        # self.recent_action_accuracies.append(1)
        self.recent_l1_losses.append(1)

        # alpha = 1e-1
        # loss = torch.tensor(
        #     0.0, device=model.device, dtype=torch.bfloat16, requires_grad=False
        # )
        # largest_loss = -float('inf')
        # largest_loss_name = 'model'
        # for name,m in model.named_modules():
        #     if (
        #         isinstance(m, Quantizer)
        #         and hasattr(m, "loss")
        #         and torch.is_tensor(m.loss)
        #     ):
        #         if m.loss.item() > largest_loss:
        #             largest_loss = m.loss.item()
        #             largest_loss_name = name

        #         loss = loss + m.loss
        #         del m.loss
        #         m.loss = torch.tensor(
        #             0.0, dtype=torch.bfloat16, device=model.device, requires_grad=False
        #         )
        # self.recent_diff_losses.append(loss.item())
        # self.largest_loss = largest_loss
        # self.largest_loss_name = largest_loss_name
        # loss = (
        #     0
        #     # model.module.vision_backbone.featurizer.diff_loss
        #     + model.module.vision_backbone.fused_featurizer.diff_loss
        #     + model.module.vision_backbone.featurizer.diff_loss
        #     + model.module.projector.diff_loss
        #     # + outputs.loss # cross_en
        #     # model.module.language_model.model.diff_loss
        # )
        loss = outputs.loss
        # if not self.diff_loss:
        #     loss = outputs.loss
        # else:
        #     loss = (
        #         sum(
        #             i.loss
        #             for i in model.module.language_model.model.layers
        #             if hasattr(i, "loss")
        #         )
        #         + 1e-1 * outputs.loss
        #     )
        if return_outputs:
            return loss, outputs
        return loss

    def log(self, logs, *args, **kwargs):
        # 计算平滑后的指标
        # if len(self.recent_losses) > 0:
        #     # logs["train_loss"] = sum(self.recent_losses) / len(self.recent_losses)
        # if logs.get("loss"):
        #     del logs["loss"]

        logs = {
            # "v1_loss": round(self.model.vision_backbone.featurizer.diff_loss.item(), 4),
            # "v2_loss": round(
            #     self.model.vision_backbone.fused_featurizer.diff_loss.item(), 4
            # ),
            # "proj_loss": round(self.model.projector.diff_loss.item(), 4),
            "acc": round(
                sum(self.recent_action_accuracies) / len(self.recent_action_accuracies),
                4,
            ),
            "lr": logs.get("learning_rate", 0.0),
        } | logs
        if logs.get("learning_rate"):
            del logs["learning_rate"]

        self.final_acc = logs["acc"]
        # torch.cuda.empty_cache()
        # m_loss = [
        #     (
        #         ".".join(
        #             k.split(".")[-2:]
        #             if k.split(".")[-2].isdigit()
        #             else k.split(".")[-3:]
        #         ),
        #         # k,
        #         v,
        #     )
        #     for k, v in sorted(
        #         self.model.diff_dict.items(), key=lambda i: i[1], reverse=True
        #     )[:5]
        # ]
        # msg = " | ".join(f"{k} : {v:.4f}" for k, v in m_loss)
        # logger.info(msg)
        #     logs["diff_loss"] = sum(self.recent_diff_losses) / len(
        #         self.recent_diff_losses
        #     )
        #     if hasattr(self,"largest_loss_name") and hasattr(self,"largest_loss"):
        #         logs["largest_loss"] = self.largest_loss
        #         logs["largest_loss_name"] = self.largest_loss_name
        #     # logs["l1_loss"] = sum(self.recent_l1_losses) / len(self.recent_l1_losses)

        super().log(logs)

        # 每10步记录到W&B
        # if self.state.global_step % 10 == 0 and self.args.should_log:
        #     wandb.log(
        #         {
        #             "train_loss": logs.get("train_loss", 0),
        #             "action_accuracy": logs.get("action_accuracy", 0),
        #             "l1_loss": logs.get("l1_loss", 0),
        #         },
        #         step=self.state.global_step,
        #     )
        #     self.final_acc = logs.get("action_accuracy", 0)

    # def training_step(self, model, inputs):
    #     torch.compiler.cudagraph_mark_step_begin()
    #     return super().training_step(model, inputs)


@torch.no_grad()
def eval_int(
    vla,
    vla_dataset,
    action_tokenizer,
    collator,
    first_data,
    eval_quant_fp,
    eval_quant_int,
    save_int_tensors,
):
    if eval_quant_fp:
        # for m in vla.language_model.modules():  # type: ignore
        #     if isinstance(m, QuantModule):
        #         m.set_quant_state(
        #             weight_quant=False, act_quant=False, integer_only=False
        #         )
        # for m in vla.vision_backbone.featurizer.modules():  # type: ignore
        #     if isinstance(m, QuantModule):
        #         m.set_quant_state(weight_quant=True, act_quant=True, integer_only=False)
        # for m in vla.vision_backbone.fused_featurizer.modules():  # type: ignore
        #     if isinstance(m, QuantModule):
        #         m.set_quant_state(weight_quant=True, act_quant=True, integer_only=False)
        # # logger.info(vla.vision_backbone.featurizer)
        # input("aaaa:")
        # logger.info(vla.vision_backbone.fused_featurizer)
        # input("aaaa:")

        vla.cuda()
        eval_model(
            "quant with fp",
            vla,
            vla_dataset,
            action_tokenizer,
            collator,
            batch_size=16,
            num_batchs=24,
        )
        input()
    # if logger.is_rank_zero:
    #     while p := input("test:"):
    #         try:
    #             print(eval(p))
    #         except Exception as e:
    #             print(e)
    # dist.barrier()
    # from quantize import (
    #     QuantSoftmax,
    #     QuantLayerNorm,
    #     QuantRMSNorm,
    #     QuantGELU,
    #     QuantSigmoid,
    # )

    # int_module = (
    #     QuantSigmoid,
    #     QuantGELU,
    #     QuantLayerNorm,
    #     QuantRMSNorm,
    #     QuantSoftmax,
    # )

    if eval_quant_int:
        for m in vla.modules():  # type: ignore
            if isinstance(m, QuantModule):
                m.set_quant_state(weight_quant=True, act_quant=True, integer_only=True)
        eval_model(
            "quant with int",
            vla,
            vla_dataset,
            action_tokenizer,
            collator,
            batch_size=16,
            num_batchs=12,
        )

    if save_int_tensors:
        for m in vla.modules():  # type: ignore
            if isinstance(m, QuantModule):
                m.set_quant_state(weight_quant=True, act_quant=True, integer_only=True)
        save_int_model(vla, {k: v[:1] for k, v in next(first_data).items()})

    if any((eval_quant_fp, eval_quant_int, save_int_tensors)):
        exit(0)


def train(
    run_dir,
    vla,
    args,
    qat_args,
    train_dataset,
    collator,
    action_tokenizer,
    callbacks=None,
    diff_loss=False,
):
    trainer = QuantTrainer(
        model=vla,
        args=args,
        train_dataset=train_dataset,
        data_collator=collator,
        action_tokenizer=action_tokenizer,
        diff_loss=diff_loss,
    )
    trainer.callback_handler.pop_callback(PrinterCallback)
    trainer.callback_handler.pop_callback(ProgressCallback)
    trainer.add_callback(RichProgressCallback)
    trainer.add_callback(RichPrinterCallback)
    if callbacks is not None:
        trainer.add_callback(callbacks)

    # if dist.get_rank() == 0:
    #     wandb.init(entity="mosrat-openvla", project="vla-quant", name=f"q+{task_name}")

    cleanup_memory()
    try:
        hf_logging.enable_progress_bar()
        trainer.train(
            # resume_from_checkpoint="/home/wenhongli/workspace/vla-quant/runs/vla_q+franka_kitchen-250813_15:57+b1024+lr-9e-05/checkpoint-400",
            # resume_from_checkpoint="/home/wenhongli/workspace/vla-quant/runs/vla_q+franka_kitchen+b32+lr-4e-05-250709_19:51/checkpoint-5000",
            # resume_from_checkpoint=(
            #     str(qat_args.resume_dir.absolute())
            #     if qat_args.resume_dir is not None
            #     else None
            # )
        )
    finally:
        if dist.get_rank() == 0:
            if trainer.final_acc > 0.5:
                logger.info(
                    f"saving model with acc={trainer.final_acc}, please wait..."
                )
                torch.save(
                    vla.state_dict(),
                    run_dir / f"saved_model_acc={trainer.final_acc}.pt",
                )
                # trainer.save_model()
                logger.info("done")


def qat_main(
    task_name: str,
    run_dir: Path,
    training_args: TrainingArguments,
    qat_args: QATArguments,
    quant_args,
    other_args,
    state: PartialState | None,
) -> None:
    processor, vla, inputs, img, action_org, vits = build_vla(
        qat_args.model_path, use_new=True
    )

    vla_dataset = prepare_dataset(
        qat_args.data_root_dir,
        qat_args.dataset_name,
        qat_args.shuffle_buffer_size,
        qat_args.image_aug,
        processor,
        vla,
        Path.cwd(),
        cache_name="dataset_new",
    )
    action_tokenizer, collator = prepare_vla_tokenizer(processor)

    if other_args.eval_fp:
        eval_model(
            "Fp",
            vla,
            vla_dataset,
            action_tokenizer,
            collator,
            batch_size=24,
            num_batchs=128,
        )
    first_bs = 64
    first_sp_rate = 0.99

    from quantize.methods.spin.fuse_ln import fuse_layer_norms
    from quantize.methods.spin.spin_blocks import random_hadamard_matrix

    # from quantize.methods.spin.train_spin import train_spin
    from quantize.methods.spin.fuse_spin import rotator_model_online

    first_data, len_cal_dataset = get_first_data(
        vla_dataset, first_bs, sample_rate=first_sp_rate
    )
    # test_batch = next(first_data)
    # with torch.inference_mode():
    #     real_output = vla(**test_batch)
    #     logger.info(f"loss before norm fuse: {real_output.loss}")

    vla.language_model = fuse_layer_norms(vla.language_model, fuse_token_embed=False)
    vla.language_model.config._attn_implementation == "sdpa"
    vla.eval()
    # with torch.inference_mode():
    #     real_output = vla(**test_batch)
    #     logger.info(f"loss after norm fuse: {real_output.loss}")

    r1_path = Path.cwd() / "cache" / "r1_new.pt"
    # r1_path = Path.cwd() / "cache" / "r1.pt"
    if not r1_path.exists():
        r1 = random_hadamard_matrix(vla.language_model.config.hidden_size, "cuda")
        torch.save(r1, r1_path)
    else:
        logger.info(f"use cache r1 {r1_path}")
        r1 = torch.load(r1_path, map_location="cpu").cuda()
    vla = rotator_model_online(vla, r1, use_pretrained=False)

    # with torch.inference_mode():
    #     spin_output = vla(**test_batch)
    #     logger.info(f"loss after spin r1: {spin_output.loss}")
    # train_spin(vla, vla_dataset, action_tokenizer, collator, quant_args)
    # exit(0)

    # logger.info("Fuse llama successfully!")
    # exit(0)

    # vla = apply_smooth_linear(
    #     vla, vla_dataset, alpha=0.5, num_samples=768, cal_batch=next(first_data)
    # )
    # vla.language_model = rotator_model(
    #     vla.language_model,
    #     Path(
    #         "/home/wenhongli/workspace/vla-quant/quantize/methods/cache/spin_OpenVLAForActionPrediction.pt"
    #     ),
    # )
    cleanup_memory()
    first_data1, len_cal_dataset = get_first_data(
        vla_dataset, first_bs, sample_rate=first_sp_rate
    )
    first_data2, len_cal_dataset = get_first_data(
        vla_dataset, first_bs, sample_rate=first_sp_rate
    )
    vla = patch_and_quant_model_for_train(
        vla,
        quant_args,
        linear_only=False,
        layer_compile=(
            not (
                False
                # or other_args.eval_fp
                or other_args.eval_quant_fp
                or other_args.eval_quant_int
                or other_args.save_int_tensors
            )
        ),
        qllama=True,
        qvit1=True,
        qvit2=True,
        qproj=True,
        qact=True,
        qweight=True,
        cal_dataset=(first_data1, first_data2),
        len_cal_dataset=len_cal_dataset,
        stat_quantizer=False,
        cache_subfix="_new",
        # cache_subfix="_c_rms",
    )
    # patch_trainable_gelu(vla, trainable=True, grad=True)

    # vla = load_pretrained(
    #     vla,
    #     [
    #         "/home/wenhongli/workspace/vla-quant/runs/vla_q+franka_kitchen-250805_21:06+b144+lr-0.0001/checkpoint-2800/pytorch_model-00001-of-00003.bin",
    #         "/home/wenhongli/workspace/vla-quant/runs/vla_q+franka_kitchen-250805_21:06+b144+lr-0.0001/checkpoint-2800/pytorch_model-00002-of-00003.bin",
    #         "/home/wenhongli/workspace/vla-quant/runs/vla_q+franka_kitchen-250805_21:06+b144+lr-0.0001/checkpoint-2800/pytorch_model-00003-of-00003.bin",
    #     ],
    #     # "/home/wenhongli/workspace/vla-quant/runs/vla_q+franka_kitchen-250730_19:09+b64+lr-0.0002/checkpoint-1000/pytorch_model.bin",
    #     use_compiled=(
    #         not (
    #             False
    #             # or other_args.eval_fp
    #             or other_args.eval_quant_fp
    #             or other_args.eval_quant_int
    #             or other_args.save_int_tensors
    #         )
    #     ),
    # )

    # vla.vision_backbone.fused_featurizer._trainable = False
    # vla.vision_backbone.featurizer._trainable = False
    # vla.projector._trainable = False
    # vla.vision_backbone.fused_featurizer._m[0] = None
    # vla.vision_backbone.featurizer._m[0] = None
    # vla.projector._m[0] = None
    # for p in vla.vision_backbone.fused_featurizer._m[1].parameters():
    #     p.requires_grad = False
    # for p in vla.vision_backbone.featurizer._m[1].parameters():
    #     p.requires_grad = False
    # for p in vla.projector._m[1].parameters():
    #     p.requires_grad = False

    # import quantize.base
    # quantize.base.QUANT_LOSS = vla
    # with torch.inference_mode():
    #     first_bs = 64
    #     first_data, len_cal_dataset = get_first_data(
    #         vla_dataset, first_bs, sample_rate=0.95
    #     )
    # quant_output = vla(**test_batch)
    # logger.info(f"loss after quant: {quant_output.loss}")
    #     input("cong")
    # for n, m in list(vla.named_parameters()) + list(vla.named_buffers()):
    #     assert m.is_cuda, n

    for m in vla.language_model.modules():  # type: ignore
        if isinstance(m, QuantModule):
            m.set_quant_state(weight_quant=True, act_quant=True, integer_only=False)
    # disable_module(vla.language_model)

    from quantize.layernorm import QuantLayerNorm

    for m in vla.vision_backbone.featurizer.modules():  # type: ignore
        if isinstance(m, QuantModule):
            m.set_quant_state(weight_quant=True, act_quant=True, integer_only=False)
        # if isinstance(m, QuantVitAttn):
        #     for p in m.qkt_mm.parameters():
        #         p.requires_grad = False

        # if isinstance(m, QuantLayerNorm):
        #     for p in m.parameters():
        #         p.requires_grad = False
    # disable_module(vla.vision_backbone.featurizer)

    for m in vla.vision_backbone.fused_featurizer.modules():  # type: ignore
        if isinstance(m, QuantModule):
            m.set_quant_state(weight_quant=True, act_quant=True, integer_only=False)
    # disable_module(vla.vision_backbone.fused_featurizer)

    for m in vla.projector.modules():  # type: ignore
        if isinstance(m, QuantModule):
            m.set_quant_state(weight_quant=True, act_quant=True, integer_only=False)
    # disable_module(vla.projector, clean_mem=False)

    # for m in vla.modules():  # type: ignore
    #     if isinstance(m, QuantModule):
    #         m.set_quant_state(weight_quant=False, act_quant=False, integer_only=False)
    from quantize.blocks import QuantLlamaAttn, QuantLlamaDecoderLayer
    from quantize.silu_ewm import QuantFusedSiLU

    # QuantLlamaDecoderLayer.forward = QuantLlamaDecoderLayer.forward_linear
    # QuantLlamaAttn.forward = QuantLlamaAttn.forward_linear
    # QuantVitAttn.forward = QuantVitAttn.forward_linear
    # QuantFusedSiLU.f = QuantFusedSiLU.f_linear

    # for m in vla.modules():
    #     if isinstance(m, QuantRMSNorm):
    #         logger.info(f"{m.name} {2 ** m.in_quantizer.scale.data.item():.8f}")
    # m.in_quantizer.scale.data = torch.empty(
    #     [1, 1, m.weight.shape[-1]],
    #     dtype=m.in_quantizer.scale.dtype,
    #     device=m.in_quantizer.scale.device,
    # )

    # base_path = Path(
    #     "/home/wenhongli/workspace/vla-quant/runs/vla_q+franka_kitchen-250813_18:16+b1024+lr-9e-05/checkpoint-200"
    # )
    # base_path = Path(
    #     "/home/wenhongli/workspace/vla-quant/runs/vla_q+franka_kitchen-250813_15:57+b1024+lr-9e-05/checkpoint-400"
    # )
    # base_path = Path(
    #     "/home/wenhongli/workspace/vla-quant/runs/vla_q+franka_kitchen-250814_10:41+b1024+lr-6e-05/checkpoint-100"
    # )
    # base_path = Path(
    #     "/home/wenhongli/workspace/vla-quant/runs/vla_q+franka_kitchen-250817_11:44+b1024+lr-0.001/checkpoint-800"
    # )
    base_path = Path(
        "/home/wenhongli/workspace/vla-quant/runs/vla_q+franka_kitchen-250824_10:58+b960+lr-0.001/checkpoint-750"
    )
    parts = 3
    vla = load_pretrained(
        vla,
        [
            base_path / f"pytorch_model-0000{i + 1}-of-0000{parts}.bin"
            for i in range(parts)
        ],
        use_compiled=(
            not (
                False
                # or other_args.eval_fp
                or other_args.eval_quant_fp
                or other_args.eval_quant_int
                or other_args.save_int_tensors
            )
        ),
        # ignore_filter="language_model;projector",
    )
    # base_path = Path(
    #     "/home/wenhongli/workspace/vla-quant/runs/vla_q+franka_kitchen-250817_11:44+b1024+lr-0.001/checkpoint-800"
    # )
    # parts = 3
    # vla = load_pretrained(
    #     vla,
    #     [
    #         base_path / f"pytorch_model-0000{i + 1}-of-0000{parts}.bin"
    #         for i in range(parts)
    #     ],
    #     use_compiled=(
    #         not (
    #             False
    #             # or other_args.eval_fp
    #             or other_args.eval_quant_fp
    #             or other_args.eval_quant_int
    #             or other_args.save_int_tensors
    #         )
    #     ),
    #     # ignore_filter="vision_backbone",
    # )

    # for m in vla.modules():
    #     if isinstance(m, QuantRMSNorm):
    #         logger.info(f"{m.name} {m.in_quantizer.scale.shape}")
    # input()
    # m.use_act_quant = False
    # for p in m.parameters():
    #     p.requires_grad = False

    eval_int(
        vla,
        vla_dataset,
        action_tokenizer,
        collator,
        first_data=first_data,
        eval_quant_fp=other_args.eval_quant_fp,
        eval_quant_int=other_args.eval_quant_int,
        save_int_tensors=other_args.save_int_tensors,
    )
    # exit(0)
    # logger.info(vla.language_model.model.layers[19]._orig_mod.self_attn.pv_mm.x1_quantizer.scale)
    # input()
    # vla.language_model.model._m[0] = torch.compile(vla.language_model.model._m[0])
    # vla.language_model.model._m[1] = torch.compile(vla.language_model.model._m[1])

    training_args.optim = "apollo_adamw"
    training_args.optim_target_modules = "all-linear"
    # training_args.lr_scheduler_kwargs = dict(min_lr_rate=1e-5)

    # s = vla.language_model.lm_head.weight.data
    # del vla.language_model.lm_head.weight
    # vla.language_model.lm_head.register_buffer("weight",s)
    # import quantize.base

    # quantize.base.QUANT_LOSS = vla
    # vla.diff_dict = {}
    cleanup_memory()

    # add_gradient_hooks(vla)
    # with torch.autograd.detect_anomaly(False):

    # for layer in vla.language_model.model.layers:
    #     for p in list(layer._orig_mod.self_attn.softmax.parameters()) + list(
    #         layer._orig_mod.self_attn.apply_rotary_pos_emb.parameters()
    #     ):
    #         p.requires_grad = False

    # for layer in vla.language_model.model.layers:
    #     parm_list = sum(
    #         (
    #             list(m.parameters())
    #             for name, m in layer.named_modules()
    #             if isinstance(m, quant_mlp_modules)
    #         ),
    #         start=[],
    #     )
    #     for p in parm_list:
    #         p.requires_grad = False
    # for layer in vla.language_model.model.layers:
    #     parm_list = sum(
    #         (
    #             list(m.parameters())
    #             for name, m in layer.named_modules()
    #             if isinstance(m, quant_modules)
    #         ),
    #         start=[],
    #     )
    #     for p in parm_list:
    #         p.requires_grad = False

    # for layer in vla.language_model.model.layers:
    #     for p in sum(
    #         (
    #             list(m.parameters())
    #             for m in layer.self_attn.modules()
    #             if not isinstance(m, QuantLinear)
    #         ),
    #         start=[],
    #     ):
    #         p.requires_grad = False
    # for layer in vla.language_model.model.layers:
    #     for p in sum(
    #         (list(m.parameters())
    #         for m in layer.mlp.modules()
    #         if not isinstance(m, QuantLinear)) ,start=[]
    #     ):
    #         p.requires_grad = False
    # for layer in vla.language_model.model.layers:
    #     for p in list(layer.self_attn.softmax.parameters()) + list(
    #         layer.self_attn.apply_rotary_pos_emb.parameters()
    #     ):
    #         p.requires_grad = False
    # for layer in vla.language_model.model.layers:
    #     for p in list(layer.mlp.parameters()):
    #         p.requires_grad = False
    vla.print_model_param_size()  # type: ignore
    vla.check_trainable_parameters(print_name="language_model")  # type: ignore
    input()
    # input("Enter to continue:")

    compile_layers(vla)

    train(
        run_dir=run_dir,
        vla=vla,
        args=training_args,
        qat_args=qat_args,
        train_dataset=vla_dataset,
        collator=collator,
        action_tokenizer=action_tokenizer,
        diff_loss=False,
    )
