import os
from dataclasses import dataclass, field
import pathlib
from typing import Dict, List
import torch

from namo.models.namo import NamoForCausalLM
from namo.utils.utils import (
    get_peft_state_maybe_zero_3,
    get_peft_state_non_lora_maybe_zero_3,
    rank0_print,
    safe_save_model_for_hf_trainer,
)

try:
    import torch_npu
    from torch_npu.contrib import transfer_to_npu
except ImportError as e:
    pass
import transformers
from namo.models.symbols import *
from namo.trainer import NamoTrainer
from namo.params import ModelArguments, DataArguments, TrainingArguments
from transformers import AutoConfig, AutoModel
from namo.models.configuration_namo import NamoConfig
from namo.utils.utils import (
    find_all_linear_names,
    find_llm_linear_names,
    print_model_params_info,
    is_main_process,
)
from namo.utils.hf_utils import (
    try_resume_conn_weights,
    get_latest_checkpoint,
    load_new_llm_weights,
)
import wandb
from loguru import logger

wandb.init(mode="disabled")

local_rank = None


def train(attn_implementation="flash_attention_2"):
    global local_rank

    parser = transformers.HfArgumentParser(
        (ModelArguments, DataArguments, TrainingArguments)
    )
    model_args, data_args, training_args = parser.parse_args_into_dataclasses()
    local_rank = training_args.local_rank
    compute_dtype = (
        torch.float16
        if training_args.fp16
        else (torch.bfloat16 if training_args.bf16 else torch.float32)
    )

    bnb_model_from_pretrained_args = {}
    if training_args.bits in [4, 8]:
        from transformers import BitsAndBytesConfig

        bnb_model_from_pretrained_args.update(
            dict(
                device_map={"": training_args.device},
                load_in_4bit=training_args.bits == 4,
                load_in_8bit=training_args.bits == 8,
                quantization_config=BitsAndBytesConfig(
                    load_in_4bit=training_args.bits == 4,
                    load_in_8bit=training_args.bits == 8,
                    llm_int8_skip_modules=["mm_projector"],
                    llm_int8_threshold=6.0,
                    llm_int8_has_fp16_weight=False,
                    bnb_4bit_compute_dtype=compute_dtype,
                    bnb_4bit_use_double_quant=training_args.double_quant,
                    bnb_4bit_quant_type=training_args.quant_type,  # {'fp4', 'nf4'}
                ),
            )
        )

    if model_args.pretrain_model_path is not None:
        pretrain_model_path = get_latest_checkpoint(model_args.pretrain_model_path)
        rank0_print(f"==> finetune from pretrained whole model: {pretrain_model_path}")
        model = NamoForCausalLM.from_pretrained(
            pretrain_model_path,
            torch_dtype=compute_dtype,
        )
        rank0_print("==> pretrained model loaded.")
        if model_args.llm_model_path is not None:
            rank0_print(
                f"==> [WARN] you set both pretrain_model_path and llm_model_path, means you want replace the LLM part in pretrain model, be noted what will do."
            )
            new_llm_model = AutoModel.from_pretrained(
                model_args.llm_model_path,
                torch_dtype=compute_dtype,
                trust_remote_code=True,
            )
            load_new_llm_weights(model, new_llm_model)
    else:
        assert (
            model_args.llm_model_path != None
        ), "you must set model_args.llm_model_path since pretrain_model_path is None."
        text_config = AutoConfig.from_pretrained(
            model_args.llm_model_path,
            trust_remote_code=True,
            attn_implementation=attn_implementation,
            torch_dtype=compute_dtype,
        )
        vision_config = AutoConfig.from_pretrained(
            model_args.ve_model_path,
            trust_remote_code=True,
            torch_dtype=compute_dtype,
            attn_implementation=attn_implementation,
        )
        config = NamoConfig(
            text_config=text_config,
            vision_config=vision_config,
            attn_implementation=attn_implementation,
            torch_dtype=compute_dtype,
            conn_ve_llm_type=model_args.conn_ve_llm_type,
            longest_edge=model_args.max_img_size,
            num_patches=model_args.num_patches,
            num_visual_tokens=model_args.num_visual_tokens,
            **bnb_model_from_pretrained_args,
        )
        model = NamoForCausalLM(config=config)
    # model = model.to(dtype=compute_dtype)

    rank0_print(f"==> current model dtype: {model.dtype}, set is: {compute_dtype}")
    tokenizer = model.get_namo().tokenizer

    model.config.use_cache = False
    model.config.use_dft = model_args.use_dft

    if model_args.freeze_backbone:
        model.model.requires_grad_(False)
        rank0_print(f"==> LLM were freezed, will not be trained.")

    if training_args.bits in [4, 8]:
        from peft import prepare_model_for_kbit_training

        model.config.torch_dtype = (
            torch.float32
            if training_args.fp16
            else (torch.bfloat16 if training_args.bf16 else torch.float32)
        )
        model = prepare_model_for_kbit_training(
            model, use_gradient_checkpointing=training_args.gradient_checkpointing
        )

    if training_args.gradient_checkpointing:
        if hasattr(model, "enable_input_require_grads"):
            try:
                model.enable_input_require_grads()
            except Exception as e:
                print(f"enable_input_require_grads: {e}")
        else:

            def make_inputs_require_grad(module, input, output):
                output.requires_grad_(True)

            model.get_input_embeddings().register_forward_hook(make_inputs_require_grad)

    if training_args.lora_enable:
        from peft import LoraConfig, get_peft_model

        lora_config = LoraConfig(
            r=training_args.lora_r,
            lora_alpha=training_args.lora_alpha,
            target_modules=find_llm_linear_names(model),
            lora_dropout=training_args.lora_dropout,
            bias=training_args.lora_bias,
            task_type="CAUSAL_LM",
            use_dora=training_args.use_dora,
        )
        if training_args.bits == 16:
            if training_args.bf16:
                model.to(torch.bfloat16)
            if training_args.fp16:
                model.to(torch.float16)
        rank0_print("Adding LoRA adapters...")
        model = get_peft_model(model, lora_config)

    if tokenizer.unk_token != None:
        tokenizer.pad_token = tokenizer.unk_token

    if tokenizer.pad_token_id == None:
        rank0_print(f"tokenizer.pad_token: {tokenizer.pad_token}")
        if "mistral" in model_args.model_name_or_path.lower():
            # important for mistral models
            tokenizer.pad_token_id = tokenizer.encode("<pad>")
        else:
            tokenizer.pad_token_id = tokenizer.encode(
                tokenizer.pad_token
                if tokenizer.pad_token is not None
                else tokenizer.eos_token
            )
        rank0_print(f"pad_token_id: {tokenizer.pad_token_id}")

    if (
        model_args.ve_model_path is not None
        or model_args.pretrain_model_path is not None
    ):
        logger.info("preparing ve model args...")
        # model.get_model().initialize_vision_modules(
        #     model_args=model_args, fsdp=training_args.fsdp
        # )
        vision_tower = model.get_vision_tower()
        vision_tower.to(
            dtype=compute_dtype,
            device=training_args.device,
        )

        model.config.unfreeze_ve = training_args.unfreeze_ve = model_args.unfreeze_ve
        if training_args.unfreeze_ve:
            for p in model.get_vision_tower().parameters():
                p.requires_grad = True

        model.config.new_img_size = model_args.new_img_size
        model.config.longest_edge = data_args.longest_edge = model_args.max_img_size
        model.config.dynamic_size = data_args.dynamic_size
        if (
            hasattr(vision_tower.image_processor, "size")
            and vision_tower.image_processor.size
        ):
            vision_tower.image_processor.size["longest_edge"] = data_args.longest_edge
        data_args.image_processor = vision_tower.image_processor
        data_args.is_multimodal = True
        # print(model_args, data_args, training_args)
        data_args.model_max_length = training_args.model_max_length

        model.config.image_aspect_ratio = data_args.image_aspect_ratio
        model.config.video_fps = data_args.video_fps
        model.config.video_frames_num = data_args.video_frames_num
        model.config.tokenizer_padding_side = tokenizer.padding_side
        model.config.tokenizer_model_max_length = tokenizer.model_max_length

        model.config.tune_conn_ve_llm = training_args.tune_conn_ve_llm = (
            model_args.tune_conn_ve_llm
        )
        if model_args.tune_conn_ve_llm:
            model.requires_grad_(False)
            for p in model.get_namo().conn_ve_llm.parameters():
                p.requires_grad = True

        model.config.freeze_conn_ve_llm = training_args.freeze_conn_ve_llm
        if training_args.freeze_conn_ve_llm:
            for p in model.get_namo().conn_ve_llm.parameters():
                p.requires_grad = False

        if training_args.lora_enable:
            if is_main_process():
                logger.warning("==> LoRA enabled, will train conn_ve_llm.")
            for p in model.get_namo().conn_ve_llm.parameters():
                p.requires_grad = True

        if training_args.bits in [4, 8]:
            model.get_namo().conn_ve_llm.to(
                dtype=compute_dtype, device=training_args.device
            )

        model.config.mm_use_im_start_end = data_args.mm_use_im_start_end = (
            model_args.mm_use_im_start_end
        )
        model.config.conn_ve_llm_lr = training_args.conn_ve_llm_lr
        model.config.s2 = model_args.s2
        model.config.s2_scales = model_args.s2_scales
        model.config.s2_max_split_size = model_args.s2_max_split_size
        training_args.use_im_start_end = model_args.mm_use_im_start_end
        model.config.mm_use_im_patch_token = model_args.mm_use_im_patch_token
        model.initialize_vision_tokenizer(model_args, tokenizer=tokenizer)

    if training_args.bits in [4, 8]:
        from peft.tuners.lora import LoraLayer

        for name, module in model.named_modules():
            if isinstance(module, LoraLayer):
                if training_args.bf16:
                    module = module.to(torch.bfloat16)
            if "norm" in name:
                module = module.to(torch.float32)
            if "lm_head" in name or "embed_tokens" in name:
                if hasattr(module, "weight"):
                    if training_args.bf16 and module.weight.dtype == torch.float32:
                        module = module.to(torch.bfloat16)

    if (
        "qwen2.5-vl" in model_args.ve_model_path
        or "hydra" in model_args.pretrain_model_path
        or "Qwen3" in model_args.pretrain_model_path
    ):
        from namo.dataset_qwenvl import make_supervised_data_module

        # if 'Qwen3' in model_args.llm_model_path:
        model_id = "Qwen3-VL-namo"
        if is_main_process():
            logger.warning(f"using Qwen3, model_id: {model_id}")
        # else:
        #     model_id = "Qwen2.5-VL-namo"

        if is_main_process():
            logger.info(
                f"using Qwen 2.5 VL dataset preparing module for VE accordingly..."
            )
        data_module = make_supervised_data_module(
            model_id=model_id,
            processor=model.get_vision_tower().image_processor,
            data_args=data_args,
        )
    else:
        from namo.dataset import make_supervised_data_module

        data_module = make_supervised_data_module(
            tokenizer=tokenizer, data_args=data_args, model_args=model_args
        )

    # resume conn
    if training_args.tune_conn_ve_llm:
        _ = try_resume_conn_weights(model, training_args.output_dir, "conn_ve_llm.bin")

    # resume conn for all
    if model_args.pretrain_conn_ve_llm_path is not None:
        # load conn weights
        weights_path = get_latest_checkpoint(
            model_args.pretrain_conn_ve_llm_path, "conn_ve_llm.bin"
        )
        if weights_path:
            model.namo.load_conn_ve_llm_weights(weights_path)
            rank0_print(f"conn_ve_llm weights loaded from: {weights_path}!")
        else:
            rank0_print(
                f"==> [WARN]: you have set conv_ve_llm: {model_args.pretrain_conn_ve_llm_path} but not found: {weights_path}! This means you will train from scratch for conn_ve_llm."
            )

    print_model_params_info(model)

    trainer = NamoTrainer(
        model=model, tokenizer=tokenizer, args=training_args, **data_module
    )

    if training_args.lora_enable:
        state_dict = get_peft_state_maybe_zero_3(
            model.named_parameters(), training_args.lora_bias
        )
        non_lora_state_dict = get_peft_state_non_lora_maybe_zero_3(
            model.named_parameters()
        )
        if is_main_process():
            has_conn = any('conn_ve_llm' in k for k in non_lora_state_dict.keys())
            has_ve = any('ve' in k for k in non_lora_state_dict.keys())
            logger.success(f"Non-LoRA state dict contains: conn_ve_llm={has_conn}, ve={has_ve}")
            
        if training_args.local_rank == 0 or training_args.local_rank == -1:
            model.config.save_pretrained(training_args.output_dir)
            torch.save(
                non_lora_state_dict,
                os.path.join(training_args.output_dir, "non_lora_trainables.bin"),
            )

    while True:
        try:
            if list(pathlib.Path(training_args.output_dir).glob("checkpoint-*")):
                trainer.train(resume_from_checkpoint=not training_args.tune_conn_ve_llm)
            else:
                trainer.train()

            if not training_args.tune_conn_ve_llm:
                trainer.save_state()
                trainer.save_model(output_dir=training_args.output_dir)
            break  # Training completed successfully, exit the loop
        except RuntimeError as e:
            if "out of memory" in str(e):
                print("Out of memory error caught. Attempting to resume training...")
                torch.cuda.empty_cache()  # Clear the CUDA cache
            else:
                print(f"got error: {e}")
                raise

    model.config.use_cache = True

    if training_args.lora_enable:
        state_dict = get_peft_state_maybe_zero_3(
            model.named_parameters(), training_args.lora_bias
        )
        non_lora_state_dict = get_peft_state_non_lora_maybe_zero_3(
            model.named_parameters()
        )
        if training_args.local_rank == 0 or training_args.local_rank == -1:
            model.config.save_pretrained(training_args.output_dir)
            model.save_pretrained(training_args.output_dir, state_dict=state_dict)
            torch.save(
                non_lora_state_dict,
                os.path.join(training_args.output_dir, "non_lora_trainables.bin"),
            )
        # todo: handle if vision_lora_enable, save vision adapter and llm adapter
    else:
        safe_save_model_for_hf_trainer(
            trainer=trainer, output_dir=training_args.output_dir
        )


if __name__ == "__main__":
    train()
