from unsloth import FastVisionModel
from unsloth.trainer import UnslothVisionDataCollator
from datasets import load_dataset
import argparse
import collections
from trl import SFTTrainer, SFTConfig


def construct_textvqa_conservation(item):
    """
    构造对话prompt
    
    Args:
        conversations (list): 对话历史
        
    Returns:
        str: 构造的prompt
    """
    image = item["image"]
    # conversations = item['conversations']
    question = item['question']
    answers = item['answers']
    import random
    answer = collections.Counter(answers).most_common(1)[0][0]
    messages =[
    
        {"role": "user", "content": [
            {"type": "text", "text": question},
            {"type": "image", "image": image}
            ]},
        {"role": "assistant", "content": [
            {"type": "text", "text": answer}
        ]}
    ]
    return {"messages":messages}


def load_model(model_id="Qwen/Qwen2.5-VL-3B-Instruct"):
    """
    加载预训练模型
    
    Args:
        model_id (str): 模型ID
        
    Returns:
        tuple: (model, processor)
    """
    # 加载预训练模型
    model, tokenizer = FastVisionModel.from_pretrained(
        model_id,
        load_in_4bit = True,
        use_gradient_checkpointing = "unsloth"
    )
    
    return model, tokenizer


def configure_lora(model):
    """
    配置LoRA参数
    
    Args:
        model: 预训练模型
        
    Returns:
        model: 应用LoRA后的模型
    """
    # LoRA配置
    model = FastVisionModel.get_peft_model(model, 
        finetune_vision_layers=True,
        finetune_language_layers=True,
        finetune_attention_modules=True,
        finetune_mlp_modules=True,
        r = 16,
        lora_alpha=16,
        lora_dropout=0,
        bias = "none",
        random_state = 3407,
        use_rslora=False,
        loftq_config=None,
    )
    return model


def prepare_dataset(data_path, processor):
    """
    准备textqaq数据集
    
    Args:
        data_path (str): 数据集路径
        processor: 模型processor
        
    Returns:
        TextQAQDataset: 数据集对象
    """
    # dataset = TextQAQDataset(data_path, processor)
    dataset = MsDataset.load('Tina12345/textVQA_groundingtask_bbox', subset_name='default', split='train[:5000]')
    dataset = [construct_textvqa_conservation(item) for item in dataset]
    return dataset


def train_model(model, tokenizer, train_dataset, output_dir="./qwen2.5-vl-3b-lora-finetuned"):
    """
    执行训练过程
    
    Args:
        model: 微调模型
        train_dataset: 训练数据集
        output_dir (str): 输出目录
    """
    FastVisionModel.for_training(model)
    trainer = SFTTrainer(
        model = model,
        tokenizer = tokenizer,
        data_collator = UnslothVisionDataCollator(model, tokenizer), # Must use!
        train_dataset = train_dataset,
        args = SFTConfig(
            per_device_train_batch_size = 2,
            gradient_accumulation_steps = 4,
            warmup_steps = 5,
            max_steps = 30,
            # num_train_epochs = 1, # Set this instead of max_steps for full training runs
            learning_rate = 2e-4,
            logging_steps = 1,
            optim = "adamw_8bit",
            weight_decay = 0.01,
            lr_scheduler_type = "linear",
            seed = 3407,
            output_dir = output_dir,
            report_to = "none",     # For Weights and Biases

            # You MUST put the below items for vision finetuning:
            remove_unused_columns = False,
            dataset_text_field = "",
            dataset_kwargs = {"skip_prepare_dataset": True},
            max_length = 2048,
        ),
    )
    # 开始训练
    trainer.train()
    model.save_pretrained("lora_model")    
    tokenizer.save_pretrained("lora_model")


def save_model(model, processor, output_dir="./qwen2.5-vl-3b-finetuned-merged"):
    """
    保存微调后的模型
    
    Args:
        model: 微调模型
        processor: 模型processor
        output_dir (str): 输出目录
    """
    # 合并LoRA权重到原始模型
    model = model.merge_and_unload()
    
    # 保存完整模型
    model.save_pretrained(output_dir)
    processor.save_pretrained(output_dir)
    
    print(f"Model saved to {output_dir}")



def main():
    """主函数"""
    parser = argparse.ArgumentParser(description="Qwen2.5-VL-3B-Instruct LoRA微调")
    parser.add_argument("--model_id", type=str, default="Qwen/Qwen2.5-VL-3B-Instruct", help="模型ID")
    parser.add_argument("--data_path", type=str, required=True, help="训练数据路径")
    parser.add_argument("--output_dir", type=str, default="./qwen2.5-vl-3b-lora-finetuned", help="输出目录")
    parser.add_argument("--epochs", type=int, default=3, help="训练轮数")
    parser.add_argument("--batch_size", type=int, default=4, help="批次大小")
    
    args = parser.parse_args()
    
    # 加载模型
    print("Loading model...")
    model, tokenizer = load_model(args.model_id)
    
    # 配置LoRA
    print("Configuring LoRA...")
    model = configure_lora(model)
    
    # 准备数据集
    print("Preparing dataset...")
    train_dataset = prepare_dataset(args.data_path, tokenizer)
    
    # 训练模型
    print("Starting training...")
    train_model(model, train_dataset, args.output_dir)
    
    print("Training completed!")


if __name__ == "__main__":
    main()