import os
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.utils.data import DataLoader, Dataset
from functools import partial
from tqdm import tqdm
import logging
import json
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from datetime import datetime
from PIL import Image
import requests
import argparse

from src.modeling_clip import CLIPModel
from src.processing_clip import CLIPProcessor
from src.modeling_siglip import SiglipModel
from src.processing_siglip import SiglipProcessor


from peft import LoraConfig, get_peft_model, PeftModel
from src.dataset import MMEBDataset, LLaVADataset
from src.dataset_vstar import VSTARBenchDataset


from src.evaluation import evaluate_model
from utils import crop_subimage, crop_all_subimages_by_ids

from src.loss import (
    subimage_feature_matching_loss, 
    clip_contrastive_loss, 
    siglip_subimage_feature_matching_loss, 
    siglip_contrastive_loss
)
from src.training_visualizer import TrainingVisualizer

# 日志配置
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)

# 批量处理函数
def collate_fn(batch):
    """将样本整理成批量处理格式，包含query_ans_texts合并文本"""
    valid_samples = []
    for sample in batch:
        has_query_image = sample['query_image'] is not None
        has_query_text = len(sample['query_text'].strip()) > 0
        has_pos_text = len(sample['pos_text'].strip()) > 0
        has_image_path = 'query_image_path' in sample and sample['query_image_path']
        
        if has_query_image and has_query_text and has_pos_text and has_image_path:
            valid_samples.append(sample)
    
    if not valid_samples:
        return None
    
    query_images = [s['query_image'] for s in valid_samples]
    query_ans_texts = [f"Query: {s['query_text'].replace('<|image_1|>', '').strip()} Answer: {s['pos_text'].strip()}" 
                      for s in valid_samples]

    return {
        'query_images': query_images,
        'query_ans_texts': query_ans_texts,
        'query_image_path': [s['query_image_path'] for s in valid_samples],
        'pos_images': [s['pos_image'] for s in valid_samples],
        'query_texts': [s['query_text'] for s in valid_samples],
        'pos_texts': [s['pos_text'] for s in valid_samples],
        'neg_texts': [s['neg_text'] for s in valid_samples]
    }

def train_epoch(epoch, model, dataloader, optimizer, processor, device, use_lora, model_type, max_text_length):
    model.train()
    total_loss = 0
    total_contrastive_loss = 0
    total_feature_matching_loss = 0
    progress_bar = tqdm(dataloader, desc=f'Epoch {epoch+1}/{EPOCHS}', position=0, leave=True, dynamic_ncols=True)

    for batch in progress_bar:
        if batch is None:
            continue
        optimizer.zero_grad(set_to_none=True)  # 使用set_to_none=True更高效地释放内存
        batch_loss = 0
        # 提取有效的图像和文本对
        valid_indices = []
        for i in range(len(batch['query_images'])):
            has_image = batch['query_images'][i] is not None
            has_text = len(batch['query_ans_texts'][i]) > 0      
            if has_image and has_text:
                valid_indices.append(i)
        if valid_indices:
            valid_query_images = [batch['query_images'][i] for i in valid_indices]
            valid_query_ans_texts = [batch['query_ans_texts'][i] for i in valid_indices]
            valid_image_paths = [batch['query_image_path'][i] for i in valid_indices]  # 获取图像路径
            # 处理图像
            image_inputs = processor(
                images=valid_query_images,
                return_tensors="pt",
                padding=True,
                truncation=False
            ).to(device)
            # 处理文本
            text_inputs = processor(
                text=valid_query_ans_texts,
                return_tensors="pt",
                padding=True,
                truncation=True,
                max_length=max_text_length
            ).to(device)
            # 获取特征和中间层输出
            with torch.autocast(device_type='cuda', enabled=False):  # 控制自动混合精度
                outputs = model(
                    input_ids=text_inputs.input_ids,
                    # attention_mask=text_inputs.attention_mask,
                    pixel_values=image_inputs.pixel_values,
                    output_hidden_states=True  # 启用中间层输出
                )
            # 处理完成后显式删除不需要的中间变量
            image_features = outputs.image_embeds
            text_features = outputs.text_embeds
            vision_hidden_states = outputs.vision_model_output.hidden_states
            del outputs  # 释放outputs占用的内存
            # 归一化特征
            image_features = nn.functional.normalize(image_features, dim=-1)
            text_features = nn.functional.normalize(text_features, dim=-1)
            # 根据模型类型选择对应的损失函数
            if model_type == "clip":
                contrastive_loss = clip_contrastive_loss(image_features, text_features)
                feature_matching_loss = torch.tensor(0.0, device=device)
                if ALPHA < 1:
                    feature_matching_loss = subimage_feature_matching_loss(
                        model, processor, valid_image_paths, vision_hidden_states, device
                    )
            else:  # siglip
                contrastive_loss = siglip_contrastive_loss(image_features, text_features)
                feature_matching_loss = torch.tensor(0.0, device=device)
                if ALPHA < 1:
                    feature_matching_loss = siglip_subimage_feature_matching_loss(
                        model, processor, valid_image_paths, vision_hidden_states, device
                    )
            batch_loss = ALPHA * contrastive_loss + (1 - ALPHA) * feature_matching_loss
            total_contrastive_loss += contrastive_loss.item()
            total_feature_matching_loss += feature_matching_loss.item()
        if batch_loss > 0:
            batch_loss.backward()
            optimizer.step()
            total_loss += batch_loss.item()
            progress_bar.set_postfix(
                loss=batch_loss.item(), 
                contrastive=contrastive_loss.item(),
                feature_match=feature_matching_loss.item(),
            )
    avg_loss = total_loss / len(dataloader) if len(dataloader) > 0 else 0
    avg_contrastive_loss = total_contrastive_loss / len(dataloader) if len(dataloader) > 0 else 0
    avg_feature_matching_loss = total_feature_matching_loss / len(dataloader) if len(dataloader) > 0 else 0
    logger.info(f'Epoch {epoch+1} 平均损失: {avg_loss:.4f}')
    logger.info(f'Epoch {epoch+1} 对比损失: {avg_contrastive_loss:.4f} | 子图特征比对损失: {avg_feature_matching_loss:.4f}')
    return avg_loss, avg_contrastive_loss, avg_feature_matching_loss

def setup_lora_model(base_model, target_modules):
    """
    为基础模型设置LoRA适配器
    """
    # 创建LoRA配置
    lora_config = LoraConfig(
        r=LORA_R,
        lora_alpha=LORA_ALPHA,
        target_modules=target_modules,
        lora_dropout=LORA_DROPOUT,
        bias="none",  # 不应用偏置
        task_type="FEATURE_EXTRACTION",
    )
    
    # 使用PEFT库的get_peft_model函数创建带有LoRA的模型
    lora_model = get_peft_model(base_model, lora_config)
    
    return lora_model

def parse_args():
    parser = argparse.ArgumentParser(description="统一的对比学习训练脚本")
    # 模型选择参数
    parser.add_argument("--model_type", type=str, default="clip", choices=["clip", "siglip"], 
                      help="选择使用的模型类型: clip 或 siglip")
    
    # LoRA配置参数
    parser.add_argument("--use_lora", action="store_true", default=True, 
                      help="是否启用LoRA微调")
    parser.add_argument("--lora_r", type=int, default=8, 
                      help="LoRA低秩矩阵秩")
    parser.add_argument("--lora_alpha", type=int, default=16, 
                      help="LoRA alpha参数")
    parser.add_argument("--lora_dropout", type=float, default=0.05, 
                      help="LoRA dropout概率")
    
    # 训练参数
    parser.add_argument("--alpha", type=float, default=1, 
                      help="对比损失权重，子图特征比对损失权重为(1-alpha)")
    parser.add_argument("--batch_size", type=int, default=4, 
                      help="训练批量大小")
    parser.add_argument("--epochs", type=int, default=20, 
                      help="训练轮数")
    parser.add_argument("--learning_rate", type=float, default=None, 
                      help="学习率，默认根据是否使用LoRA自动设置")
    parser.add_argument("--weight_decay", type=float, default=1e-4, 
                      help="权重衰减")
    parser.add_argument("--save_interval", type=int, default=20, 
                      help="模型保存间隔")
    parser.add_argument("--vis_interval", type=int, default=1, 
                      help="可视化更新间隔")
    parser.add_argument("--max_samples", type=int, default=1000, 
                      help="最大训练样本数")
    parser.add_argument("--max_text_length", type=int, default=256, 
                      help="文本最大长度")
    parser.add_argument("--use_chinese_font", action="store_true", default=True, 
                      help="是否使用中文字体")
    # 数据参数
    parser.add_argument("--subsets", type=str, nargs='+', default=["ImageNet_1K", "OK-VQA"], # llava
                      help="使用的数据集子集")
    
    return parser.parse_args()

def main():
    # 解析命令行参数
    args = parse_args()
    
    # 全局参数配置
    global ALPHA, BATCH_SIZE, EPOCHS, LEARNING_RATE, WEIGHT_DECAY, SAVE_INTERVAL, VIS_INTERVAL
    global MAX_SAMPLES, USE_LORA, LORA_R, LORA_ALPHA, LORA_DROPOUT, LORA_TARGET_MODULES
    
    MODEL_TYPE = args.model_type
    USE_LORA = args.use_lora
    LORA_R = args.lora_r
    LORA_ALPHA = args.lora_alpha
    LORA_DROPOUT = args.lora_dropout
    # 不同模型可能有不同的目标模块
    LORA_TARGET_MODULES = ['q_proj', 'v_proj', 'k_proj', 'out_proj']  # 两个模型通用的模块
    
    ALPHA = args.alpha
    BATCH_SIZE = args.batch_size
    EPOCHS = args.epochs
    LEARNING_RATE = args.learning_rate if args.learning_rate is not None else (2e-4 if USE_LORA else 5e-5)
    WEIGHT_DECAY = args.weight_decay
    SAVE_INTERVAL = args.save_interval
    VIS_INTERVAL = args.vis_interval
    MAX_SAMPLES = args.max_samples
    MAX_TEXT_LENGTH = args.max_text_length
    EVAL_LOCAL_ALIGN = False
    
    # 设置中文字体
    if args.use_chinese_font:
        plt.rcParams["font.family"] = ["Microsoft YaHei"]
    plt.rcParams['axes.unicode_minus'] = False  # 解决负号显示问题
    
    # 设置设备
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    logger.info(f'使用设备: {device}')
    logger.info(f'模型类型: {MODEL_TYPE}')
    logger.info(f'LoRA启用状态: {USE_LORA}')
    
    # 数据参数
    class DataArgs:
        def __init__(self):
            self.dataset_split = "original"
            self.eval_batch_size = BATCH_SIZE
            self.image_resolution = "small"
            self.num_workers = min(os.cpu_count(), 8)
            self.max_samples = MAX_SAMPLES
            # 移除max_eval_samples属性
            # self.max_eval_samples = MAX_EVAL_SAMPLES
    
    data_args = DataArgs()
    
    # 根据模型类型加载相应的模型和处理器
    logger.info(f'加载{MODEL_TYPE.upper()}模型和处理器...')
    if MODEL_TYPE == "clip":
        base_model = CLIPModel.from_pretrained(
            "models/clip-vit-base-patch16-256" if os.path.exists("models/clip-vit-base-patch16-256") 
            else "openai/clip-vit-base-patch16"
        ).to(device)
        processor = CLIPProcessor.from_pretrained(
            "models/clip-vit-base-patch16-256" if os.path.exists("models/clip-vit-base-patch16-256")
            else "openai/clip-vit-base-patch16"
        )
    else:  # siglip
        base_model = SiglipModel.from_pretrained("models/siglip-so400m-patch14-224").to(device)
        processor = SiglipProcessor.from_pretrained("models/siglip-so400m-patch14-224", use_fast=True)
    
    # 根据开关决定是否应用LoRA
    if USE_LORA:
        model = setup_lora_model(base_model, LORA_TARGET_MODULES)
        model.print_trainable_parameters()
    else:
        model = base_model  # 全量微调模式
    
    # 创建数据集和数据加载器
    logger.info('创建数据集和数据加载器...')
    if "vstar" in args.subsets:
        full_dataset = VSTARBenchDataset(
            max_samples=data_args.max_samples,
        )
    elif "llava" in args.subsets:
        full_dataset = LLaVADataset(
            max_samples=data_args.max_samples,
        )
    else:
        full_dataset = MMEBDataset(
            subsets=args.subsets,
            max_samples=data_args.max_samples,
        )
    
    # 按比例拆分训练集和评估集（80%训练，20%评估）
    train_size = int(0.8 * len(full_dataset))
    eval_size = len(full_dataset) - train_size
    train_dataset, eval_dataset = torch.utils.data.random_split(full_dataset, [train_size, eval_size])
    
    logger.info(f"数据集总样本数: {len(full_dataset)}, 训练集样本数: {len(train_dataset)}, 评估集样本数: {len(eval_dataset)}")
    
    collate_fn_with_root = partial(collate_fn)
    train_dataloader = DataLoader(
        train_dataset,
        batch_size=BATCH_SIZE,
        shuffle=True,
        num_workers=data_args.num_workers,
        collate_fn=collate_fn_with_root,
        drop_last=True
    )
    
    # 定义优化器（仅优化可训练参数）
    optimizer = optim.AdamW(
        model.parameters(),
        lr=LEARNING_RATE,
        weight_decay=WEIGHT_DECAY
    )
    
    # 初始化可视化器
    visualizer = TrainingVisualizer(save_interval=VIS_INTERVAL)
    
    # 创建输出目录
    os.makedirs("outputs", exist_ok=True)
    os.makedirs("models", exist_ok=True)
    
    # 开始训练
    logger.info('开始训练...')
    start_epoch = 0  # 从30 epoch开始继续训练
    for epoch in range(start_epoch, EPOCHS):
        avg_loss, avg_contrastive_loss, avg_feature_matching_loss = train_epoch(
            epoch, model, train_dataloader, optimizer, processor, device, USE_LORA, MODEL_TYPE, MAX_TEXT_LENGTH
        )
        visualizer.update_train_loss(
            epoch + 1, avg_loss, avg_contrastive_loss, avg_feature_matching_loss
        )
        
        # 保存模型（LoRA模式下仅保存适配器）
        if (epoch + 1) % SAVE_INTERVAL == 0:
            save_dir = f'models/{MODEL_TYPE}{"-lora" if USE_LORA else ""}-alpha-{ALPHA}-epoch-{epoch+1}'
            os.makedirs(save_dir, exist_ok=True)
            
            if USE_LORA:
                # 保存LoRA适配器（体积小）
                model.save_pretrained(save_dir)
                # 同时保存基础模型配置（加载时需要）
                if not os.path.exists(os.path.join(save_dir, "base_model_config")):
                    base_model.config.save_pretrained(os.path.join(save_dir, "base_model_config"))
            else:
                # 全量保存模型
                model.save_pretrained(save_dir)
                processor.save_pretrained(save_dir)
                
            logger.info(f'模型已保存至 {save_dir}')
        
        # 评估
        if (epoch + 1) % 1 == 0:
            logger.info(f"开始第 {epoch + 1} 轮评估...")
            # 评估时使用基础模型（LoRA模式下会自动合并适配器）
            eval_model = model.base_model if USE_LORA else model
            eval_results = evaluate_model(eval_model, processor, data_args, dataset=eval_dataset, enable_local_alignment=EVAL_LOCAL_ALIGN)
            logger.info(f"第 {epoch + 1} 轮评估结果: {eval_results}")
            visualizer.update_eval_metrics(epoch + 1, eval_results)
        
        # 更新可视化
        visualizer.plot_all(epoch + 1)
        
        # 保存评估结果
        output_filename = f"outputs/{MODEL_TYPE}_eval_results_epoch_{epoch + 1}.json"
        with open(output_filename, "w", encoding='utf-8') as f:
            json.dump(eval_results, f, indent=4, ensure_ascii=False)
    
    logger.info('训练完成!')
    
if __name__ == '__main__':
    if os.name == 'nt':
        import multiprocessing
        multiprocessing.freeze_support()
    main()