import sklearn  # 避免共享库冲突，确保加载顺序
import argparse
import logging
import os
import random
from functools import partial
from typing import Dict, List, Tuple
import numpy as np
import torch
from utils.augment import RandomDotGenerator, construct_description
from torch.optim import AdamW
from torch.utils.data import Dataset
from transformers import Trainer, TrainingArguments
from PIL import Image
from janus.models import MultiModalityCausalLM, VLChatProcessor

logging.basicConfig(level=logging.INFO,
                    format="%(asctime)s - %(levelname)s - %(message)s")
logger = logging.getLogger(__name__)
NUM_TOKEN = 14
def parse_args():
    parser = argparse.ArgumentParser(
        description="Train Multi-modal Model with Grid Tokens")
    parser.add_argument("--data_path", type=str,
                        default="/mnt/c/DocumentWorkSpace/public_processes/APTOS/images/")
    parser.add_argument("--pretrained_model", type=str,
                        default="./pretrained/eyekowner")
    parser.add_argument("--output_dir", type=str,
                        default="./experiments/output")
    parser.add_argument("--batch_size", type=int, default=1)
    parser.add_argument("--epochs", type=int, default=5)
    parser.add_argument("--lr", type=float, default=3e-4)
    parser.add_argument("--seed", type=int, default=42)
    parser.add_argument("--num_token", type=int, default=14)
    parser.add_argument(
        "--server", "-s", action='store_true', help='在Ascend服务器端运行')
    args = parser.parse_args()
    NUM_TOKEN = args.num_token
    return args


def set_seed(seed):
    """设置随机种子，保证可复现"""
    random.seed(seed)
    np.random.seed(seed)
    torch.manual_seed(seed)
    if torch.cuda.is_available():
        torch.cuda.manual_seed_all(seed)


class PatchedMultiModalityCausalLM(MultiModalityCausalLM):
    def forward(self, input_ids=None, attention_mask=None, pixel_values=None,
                images_seq_mask=None, images_emb_mask=None, labels=None,  **kwargs):
        kwargs.pop('sft_format', None)
        kwargs.pop('loss_mask', None)
        inputs_embeds = None

        if pixel_values is not None:
            pixel_values = pixel_values.to(dtype=self.dtype)
            inputs_embeds = self.prepare_inputs_embeds(
                input_ids=input_ids,
                pixel_values=pixel_values,
                images_seq_mask=images_seq_mask,
                images_emb_mask=images_emb_mask,
            )

        if inputs_embeds is not None:
            kwargs.pop("inputs_embeds", None)
            outputs = self.language_model(
                input_ids=None,
                inputs_embeds=inputs_embeds,
                attention_mask=attention_mask,
                labels=labels,
                **kwargs,
            )
        else:
            outputs = self.language_model(
                input_ids=input_ids,
                attention_mask=attention_mask,
                labels=labels,
                **kwargs,
            )
        return outputs

    
class MultiModalDataset(Dataset):
    """自定义多模态数据集，生成图像-文本对"""

    def __init__(self, data_path, num_token=14, dot_num_range=(1, 5), dot_size_range=(0.02, 0.05),
                 dot_color_list=None, style_prob_list=[0.25, 0.25, 0.25, 0.25]):
        self.image_name_list = []
        self.num_token = num_token
        img_exts = {".jpg", ".jpeg", ".png"}
        dot_color_list = dot_color_list or ['red', 'orange', 'yellow', 'green', 'cyan', 'blue', 'purple']

        # 1. 调用函数获取图片路径列表
        self.get_image_path_list(data_path, img_exts)

        # 2. 初始化随机点生成器（用于生成带标记点的图片）
        self.generator = RandomDotGenerator(
            dot_num_range, dot_size_range, dot_color_list)

        print(f"已加载 {len(self.image_name_list)} 张图片文件")

        # 3. 保存风格概率列表
        self.style_prob_list = style_prob_list

    def get_image_path_list(self, data_path, img_exts):
        """获取图像文件路径的函数"""
        if data_path[-1] == '/':
            data_path = data_path[:-1]
        if not data_path.endswith('images'):
            images_dir = os.path.join(data_path, 'images')
            if os.path.isdir(images_dir):
                for file in os.listdir(images_dir):
                    if os.path.splitext(file)[1].lower() in img_exts:
                        self.image_name_list.append(
                            os.path.join(images_dir, file))
            else:
                for subdir in os.listdir(data_path):
                    subdir_path = os.path.join(data_path, subdir)
                    if os.path.isdir(subdir_path) and 'images' in os.listdir(subdir_path):
                        images_dir = os.path.join(subdir_path, 'images')
                        for file in os.listdir(images_dir):
                            if os.path.splitext(file)[1].lower() in img_exts:
                                self.image_name_list.append(
                                    os.path.join(images_dir, file))
        else:
            for file in os.listdir(data_path):
                if os.path.splitext(file)[1].lower() in img_exts:
                    self.image_name_list.append(os.path.join(data_path, file))
    def __len__(self):
        return len(self.image_name_list)

    def __getitem__(self, idx):
        img_path = self.image_name_list[idx]
        img = Image.open(img_path).convert("RGB")
        style = self.get_random_style()
        img_with_dots, info = self.generator.generate(img, style=style)
    
        question_types = ["位置识别", "邻近判断", "方位推断", "颜色形状综合", "大区域覆盖"]
        question_type = random.choice(question_types)
        question, answer = construct_description(info['dots'], num_token=self.num_token, question_type=question_type)
    
        return {
            "image": img_with_dots,
            "question": question,
            "answer": answer,
            "style": style
        }


    def get_random_style(self):
        return random.choices(population=[0, 1, 2, 3], weights=self.style_prob_list, k=1)[0]


# 自定义 Trainer
class CustomTrainer(Trainer):
    def compute_loss(self, model, inputs, return_outputs=False, **kwargs):
        labels = inputs.pop("labels")
        outputs = model(**inputs)
        # print('model finish')
        logits = outputs.get("logits")

        # 计算自定义损失
        loss_fct = torch.nn.CrossEntropyLoss(reduction="none")
        loss = loss_fct(logits.view(-1, logits.size(-1)), labels.view(-1))

        mask = inputs.pop("loss_mask", None)
        if mask is not None:
            loss = loss * mask.view(-1)
            loss = loss.sum() / mask.sum()
        else:
            loss = loss.mean()

        return (loss, outputs) if return_outputs else loss

def collate_fn(batch, processor):
    images = [item["image"] for item in batch]
    questions = [item["question"] for item in batch]
    answers = [item["answer"] for item in batch]

    conversations = []
    for q, a in zip(questions, answers):
        conversations.extend([
            {"role": "<|User|>", "content": "<image_placeholder>\n" + q},
            {"role": "<|Assistant|>", "content": a}
        ])

    encoded = processor(
        conversations=conversations, images=images, return_tensors="pt",
        force_batchify=True
    )
    encoded = dict(encoded)
    encoded["labels"] = encoded["input_ids"].clone()

    # loss_mask = generate_loss_mask( 不使用loss mask
    #     encoded["input_ids"],
    #     num_token=processor.tokenizer.num_token if hasattr(
    #         processor.tokenizer, 'num_token') else 14,
    #     tokenizer=processor.tokenizer
    # )
    # encoded["loss_mask"] = loss_mask 

    return encoded


def add_tokens(model, processor, new_tokens):
    tokenizer_vocab_size = len(processor.tokenizer)
    embedding_layer = model.language_model.get_input_embeddings()
    embedding_size = embedding_layer.weight.shape[0]

    # 计算剩余预留空间
    reserved_space = embedding_size - tokenizer_vocab_size
    if reserved_space < len(new_tokens):
        raise ValueError("预留空间不足，无法容纳新增token！")

    # 添加token到tokenizer
    processor.tokenizer.add_tokens(new_tokens)

    # 此时不需要 resize embedding层，直接利用原有预留空间
    # processor.tokenizer的vocab会增加，但embedding不resize

    # 冻结所有参数，放开Embedding更新
    for param in model.parameters():
        param.requires_grad = False
    embedding_layer.weight.requires_grad = True
    model.language_model.lm_head.weight.requires_grad = True

    # 用已有空间词汇初始化新增token
    # space_words = [
    #     "left", "right", "top", "bottom", "center", "middle", "region", "area",  # 原英文词
    #     "左", "右", "上", "下", "中", "中间", "左上", "左下", "右上", "右下",
    #     "左边", "右边", "上方", "下方", "中央",
    # ]

    # # 获取所有空间词的token id，确保它们是有效的
    # all_token_ids = []
    # for word in space_words:
    #     tokens = processor.tokenizer.tokenize(word)  # 将中文词拆分为子词
    #     token_ids = processor.tokenizer.convert_tokens_to_ids(tokens)
    #     for tid in token_ids:
    #         if tid is not None:  # 确保是有效的token id
    #             all_token_ids.append(tid)
    # print(f"空间词汇token id: {all_token_ids}")
    # 直接使用判断后的空间id
    all_token_ids = [1354, 1035, 2287, 7423, 4690, 22785, 15392, 12156, 6815, 6871, 816, 1155, 764, 21903, 6815, 816, 6815, 1155, 6871, 816, 6871, 1155, 79711, 84624, 40442, 31944, 12738]
    if not all_token_ids:
        raise ValueError("用于初始化的参考token不存在于词表中！")
    new_vocab_size = tokenizer_vocab_size + len(new_tokens)
    assert new_vocab_size == tokenizer_vocab_size + len(new_tokens)
    # # 裁剪多余的embedding层（使其适配新的词汇表大小）
    # embedding_layer.weight.data = embedding_layer.weight.data[:new_vocab_size]

    # # 裁剪lm_head的权重，确保它的输出维度适配新的token数
    # model.language_model.lm_head.weight.data = model.language_model.lm_head.weight.data[:new_vocab_size]

    # 计算空间词汇embedding的平均值，用来初始化新token的embedding
    with torch.no_grad():
        spatial_embedding = embedding_layer.weight.data[all_token_ids].mean(dim=0)
        start_pos = tokenizer_vocab_size  # 新增token的位置
        embedding_layer.weight.data[start_pos:start_pos+len(new_tokens)] = spatial_embedding

    # 只冻结老token的梯度（对原始token进行轻微调整）
    # def freeze_old_hook(grad):
    #     grad[:tokenizer_vocab_size] *= 0.1  # 允许微小梯度更新
    #     return grad

    # embedding_layer.weight.register_hook(freeze_old_hook)
    # model.language_model.lm_head.weight.register_hook(freeze_old_hook)


def generate_loss_mask(input_ids, num_token, tokenizer):
    mask = torch.zeros(input_ids.shape, dtype=torch.long)

    new_tokens = [f"<grid_{i}>" for i in range(num_token)]
    new_token_ids = tokenizer.convert_tokens_to_ids(new_tokens)

    for i, input_id in enumerate(input_ids.view(-1)):
        if input_id in new_token_ids:
            mask.view(-1)[i] = 1

    return mask


def print_device_info():
    try:
        import torch_npu
    except ImportError:
        torch_npu = None

    if torch_npu and torch_npu.npu.is_available():
        num_npus = torch_npu.npu.device_count()
        print(f"Ascend NPU is available, number of NPUs: {num_npus}")

        for i in range(num_npus):
            print(f"NPU {i}: {torch_npu.npu.device(i)}")

        if num_npus > 1:
            print(f"Multiple NPU devices are being used for training.")
        else:
            print("Only one NPU device is being used.")
    else:
        print("Ascend NPU is not available, using CPU or GPU.")


def main():
    args = parse_args()
    set_seed(args.seed)

    if args.server:  # 处理华为ascend
        import torch_npu
        from torch_npu.contrib import transfer_to_npu
        print_device_info()

    dataset = MultiModalDataset(args.data_path, num_token=args.num_token)

    processor = VLChatProcessor.from_pretrained(args.pretrained_model)
    model = PatchedMultiModalityCausalLM.from_pretrained(
        args.pretrained_model, torch_dtype=torch.float16
    )

    new_tokens = [f"<grid_{i}>" for i in range(args.num_token)]
    add_tokens(model, processor, new_tokens)

    optimizer = AdamW([p for p in model.parameters() if p.requires_grad], lr=args.lr)

    training_args = TrainingArguments(
        output_dir=args.output_dir,
        num_train_epochs=args.epochs,
        per_device_train_batch_size=args.batch_size,
        gradient_accumulation_steps=4,
        learning_rate=1e-5,
        max_grad_norm=0.5,
        save_strategy="epoch",
        logging_steps=10,
        remove_unused_columns=False,
        dataloader_num_workers=2,
        report_to="none",
        dataloader_drop_last=True,
        dataloader_pin_memory=False,
        ddp_find_unused_parameters=False,
    )

    trainer = CustomTrainer(
        model=model,
        args=training_args,
        train_dataset=dataset,
        data_collator=partial(collate_fn, processor=processor),
        optimizers=(optimizer, None),
    )

    logger.info("🚀 开始模型训练 ...")
    if args.server:  # Ascend NPU环境
        with torch_npu.npu.amp.autocast():
            trainer.train()
    else:
        # with torch.cuda.amp.autocast():
        trainer.train()

    logger.info("✅ 训练结束，正在保存模型 ...")

    trainer.save_model(args.output_dir)
    processor.save_pretrained(args.output_dir)


if __name__ == "__main__":
    main()
