import os
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.utils.data import DataLoader, Dataset
from functools import partial
from tqdm import tqdm
import logging
import json
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from datetime import datetime
from PIL import Image
import requests
# from transformers import CLIPModel, CLIPProcessor
from src.modeling_clip import CLIPModel
from src.configuration_clip import CLIPConfig
from src.processing_clip import CLIPProcessor
from src.modeling_siglip import SiglipModel
from src.configuration_siglip import SiglipConfig
from src.processing_siglip import SiglipProcessor
from peft import LoraConfig, get_peft_model, PeftModel
from src.dataset import MMEBDataset
from src.evaluation import evaluate_model
from utils import crop_subimage, crop_all_subimages_by_ids

from src.loss import (
    subimage_feature_matching_loss, 
    clip_contrastive_loss, 
    siglip_subimage_feature_matching_loss, 
    siglip_contrastive_loss
)
from src.training_visualizer import TrainingVisualizer
# 日志配置
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)

plt.rcParams["font.family"] = ["Microsoft YaHei"]
plt.rcParams['axes.unicode_minus'] = False  # 解决负号显示问题

# 批量处理函数
def collate_fn(batch, train_root_dir):
    """将样本整理成批量处理格式，包含query_ans_texts合并文本"""
    valid_samples = []
    for sample in batch:
        has_query_image = sample['query_image'] is not None
        has_query_text = len(sample['query_text'].strip()) > 0
        has_pos_text = len(sample['pos_text'].strip()) > 0
        has_image_path = 'query_image_path' in sample and sample['query_image_path']
        
        if has_query_image and has_query_text and has_pos_text and has_image_path:
            valid_samples.append(sample)
    
    if not valid_samples:
        return None
    
    query_images = [s['query_image'] for s in valid_samples]
    query_ans_texts = [f"Query: {s['query_text'].replace('<|image_1|>', '').strip()} Answer: {s['pos_text'].strip()}" 
                      for s in valid_samples]
    query_image_paths = [os.path.join(train_root_dir, s['query_image_path']) for s in valid_samples]  # 提取图像路径
    
    return {
        'query_images': query_images,
        'query_ans_texts': query_ans_texts,
        'query_image_path': query_image_paths,  # 包含图像路径
        'pos_images': [s['pos_image'] for s in valid_samples],
        'query_texts': [s['query_text'] for s in valid_samples],
        'pos_texts': [s['pos_text'] for s in valid_samples],
        'neg_texts': [s['neg_text'] for s in valid_samples]
    }

def train_epoch(epoch, model, dataloader, optimizer, processor, device, use_lora):
    model.train()
    total_loss = 0
    total_contrastive_loss = 0
    total_feature_matching_loss = 0
    # 修改主训练进度条，设置position=0固定在底部，leave=True保持显示
    progress_bar = tqdm(dataloader, desc=f'Epoch {epoch+1}/{EPOCHS}', position=0, leave=True, dynamic_ncols=True)

    for batch in progress_bar:
        if batch is None:
            continue
        # 确保梯度正确清零
        optimizer.zero_grad(set_to_none=True)  # 使用set_to_none=True更高效地释放内存
        batch_loss = 0
        # 提取有效的图像和文本对
        valid_indices = []
        for i in range(len(batch['query_images'])):
            has_image = batch['query_images'][i] is not None
            has_text = len(batch['query_ans_texts'][i]) > 0     
            if has_image and has_text:
                valid_indices.append(i)
        if valid_indices:
            valid_query_images = [batch['query_images'][i] for i in valid_indices]
            valid_query_ans_texts = [batch['query_ans_texts'][i] for i in valid_indices]
            valid_image_paths = [batch['query_image_path'][i] for i in valid_indices]  # 获取图像路径
            # 处理图像
            image_inputs = processor(
                images=valid_query_images,
                return_tensors="pt",
                padding=True,
                truncation=False
            ).to(device)
            # 处理文本
            text_inputs = processor(
                text=valid_query_ans_texts,
                return_tensors="pt",
                padding=True,
                truncation=True,
                max_length=256
            ).to(device)
            # 获取特征和中间层输出
            with torch.autocast(device_type='cuda', enabled=False):  # 控制自动混合精度
                outputs = model(
                    input_ids=text_inputs.input_ids,
                    # attention_mask=text_inputs.attention_mask,
                    pixel_values=image_inputs.pixel_values,
                    output_hidden_states=True  # 启用中间层输出
                )
            # 处理完成后显式删除不需要的中间变量
            image_features = outputs.image_embeds
            text_features = outputs.text_embeds
            vision_hidden_states = outputs.vision_model_output.hidden_states
            del outputs  # 释放outputs占用的内存
            # 归一化特征
            image_features = nn.functional.normalize(image_features, dim=-1)
            text_features = nn.functional.normalize(text_features, dim=-1)
            # 计算对比损失
            contrastive_loss = clip_contrastive_loss(image_features, text_features)
            
            # 计算子图特征比对损失
            feature_matching_loss = torch.tensor(0.0, device=device)
            if ALPHA < 1:
                feature_matching_loss = subimage_feature_matching_loss(
                    model, processor, valid_image_paths, vision_hidden_states, device
                )
            batch_loss = ALPHA * contrastive_loss + (1 - ALPHA) * feature_matching_loss
            total_contrastive_loss += contrastive_loss.item()
            total_feature_matching_loss += feature_matching_loss.item()
        if batch_loss > 0:
            batch_loss.backward()
            optimizer.step()
            total_loss += batch_loss.item()
            progress_bar.set_postfix(
                loss=batch_loss.item(), 
                contrastive=contrastive_loss.item(),
                feature_match=feature_matching_loss.item(),
            )
    avg_loss = total_loss / len(dataloader) if len(dataloader) > 0 else 0
    avg_contrastive_loss = total_contrastive_loss / len(dataloader) if len(dataloader) > 0 else 0
    avg_feature_matching_loss = total_feature_matching_loss / len(dataloader) if len(dataloader) > 0 else 0
    logger.info(f'Epoch {epoch+1} 平均损失: {avg_loss:.4f}')
    logger.info(f'Epoch {epoch+1} 对比损失: {avg_contrastive_loss:.4f} | 子图特征比对损失: {avg_feature_matching_loss:.4f}')
    return avg_loss, avg_contrastive_loss, avg_feature_matching_loss

# --------------------------
# 新增：LoRA配置与开关参数
# --------------------------
USE_LORA = True  # 开关：True启用LoRA，False使用全量微调
LORA_R = 8       # 低秩矩阵秩
LORA_ALPHA = 16
LORA_DROPOUT = 0.05
LORA_TARGET_MODULES = ['q_proj', 'v_proj', 'k_proj', 'out_proj']
# --------------------------
# 全局参数
# --------------------------
ALPHA = 0.5
BATCH_SIZE = 16
EPOCHS = 30
LEARNING_RATE = 2e-4 if USE_LORA else 5e-5  # LoRA可用更高学习率
WEIGHT_DECAY = 1e-4
SAVE_INTERVAL = 30
VIS_INTERVAL = 1
MAX_SAMPLES = 1000
MAX_EVAL_SAMPLES = 400

def main():
    # 设置设备
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    logger.info(f'使用设备: {device}')
    logger.info(f'LoRA启用状态: {USE_LORA}')

    # 数据参数
    class DataArgs:
        def __init__(self):
            self.data_dir = "data/MMEB-train"
            self.eval_data_dir = "data/MMEB-train"
            self.dataset_split = "original"
            self.eval_batch_size = BATCH_SIZE
            self.image_resolution = "small"
            self.num_workers = min(os.cpu_count(), 8)
            self.max_samples = MAX_SAMPLES
            self.max_eval_samples = MAX_EVAL_SAMPLES

    data_args = DataArgs()

    # 加载模型和处理器
    logger.info('加载CLIP模型和处理器...')
    base_model = CLIPModel.from_pretrained(
        "models/clip-vit-base-patch16-256" if os.path.exists("models/clip-vit-base-patch16-256") 
        else "openai/clip-vit-base-patch16"
    ).to(device)
    processor = CLIPProcessor.from_pretrained(
        "models/clip-vit-base-patch16-256" if os.path.exists("models/clip-vit-base-patch16-256")
        else "openai/clip-vit-base-patch16"
    )
    # base_model = SiglipModel.from_pretrained("models/siglip-so400m-patch14-224").to(device)
    # processor = SiglipProcessor.from_pretrained("models/siglip-so400m-patch14-224", use_fast=True)
    
    # 根据开关决定是否应用LoRA
    if USE_LORA:
        model = setup_lora_model(base_model)
        model.print_trainable_parameters()
    else:
        model = base_model  # 全量微调模式
    
    # 创建数据集和数据加载器
    logger.info('创建数据集和数据加载器...')
    train_dataset = MMEBDataset(
        data_dir=data_args.data_dir,
        subsets=["DocVQA", "ChartQA"], # ["ImageNet_1K", "OK-VQA"], # ["DocVQA", "InfographicsVQA", "VisDial"],
        max_samples=data_args.max_samples,
    )
    logger.info(f"数据集样本数: {len(train_dataset)}")

    collate_fn_with_root = partial(collate_fn, train_root_dir=data_args.data_dir)
    train_dataloader = DataLoader(
        train_dataset,
        batch_size=BATCH_SIZE,
        shuffle=True,
        num_workers=data_args.num_workers,
        collate_fn=collate_fn_with_root,
        drop_last=True
    )

    # 定义优化器（仅优化可训练参数）
    optimizer = optim.AdamW(
        model.parameters(),
        lr=LEARNING_RATE,
        weight_decay=WEIGHT_DECAY
    )

    # 初始化可视化器
    visualizer = TrainingVisualizer(save_interval=VIS_INTERVAL)

    # 创建输出目录
    os.makedirs("outputs", exist_ok=True)
    os.makedirs("models", exist_ok=True)

    # 开始训练
    logger.info('开始训练...')
    start_epoch = 0  # 从30 epoch开始继续训练
    for epoch in range(start_epoch, EPOCHS):
        avg_loss, avg_contrastive_loss, avg_feature_matching_loss = train_epoch(
            epoch, model, train_dataloader, optimizer, processor, device, USE_LORA
        )
        visualizer.update_train_loss(
            epoch + 1, avg_loss, avg_contrastive_loss, avg_feature_matching_loss
        )
        
        # 保存模型（LoRA模式下仅保存适配器）
        if (epoch + 1) % SAVE_INTERVAL == 0:
            save_dir = f'models/clip-mmeb{"-lora" if USE_LORA else ""}-alpha-{ALPHA}-epoch-{epoch+1}'
            os.makedirs(save_dir, exist_ok=True)
            
            if USE_LORA:
                # 保存LoRA适配器（体积小）
                model.save_pretrained(save_dir)
                # 同时保存基础模型配置（加载时需要）
                if not os.path.exists(os.path.join(save_dir, "base_model_config")):
                    base_model.config.save_pretrained(os.path.join(save_dir, "base_model_config"))
            else:
                # 全量保存模型
                model.save_pretrained(save_dir)
                processor.save_pretrained(save_dir)
                
            logger.info(f'模型已保存至 {save_dir}')

        # 评估
        if (epoch + 1) % 1 == 0:
            logger.info(f"开始第 {epoch + 1} 轮评估...")
            # 评估时使用基础模型（LoRA模式下会自动合并适配器）
            eval_model = model.base_model if USE_LORA else model
            eval_results = evaluate_model(eval_model, processor, data_args)
            logger.info(f"第 {epoch + 1} 轮评估结果: {eval_results}")
            visualizer.update_eval_metrics(epoch + 1, eval_results)
        
        # 更新可视化
        visualizer.plot_all(epoch + 1)
        
        # 保存评估结果
        with open(f"outputs/clip_eval_results_epoch_{epoch + 1}.json", "w", encoding='utf-8') as f:
            json.dump(eval_results, f, indent=4, ensure_ascii=False)

    logger.info('训练完成!')

def setup_lora_model(base_model):
    """
    为基础模型设置LoRA适配器
    """
    # 创建LoRA配置
    lora_config = LoraConfig(
        r=LORA_R,
        lora_alpha=LORA_ALPHA,
        target_modules=LORA_TARGET_MODULES,
        lora_dropout=LORA_DROPOUT,
        bias="none",  # 不应用偏置
        task_type="FEATURE_EXTRACTION",
    )
    
    # 使用PEFT库的get_peft_model函数创建带有LoRA的模型
    lora_model = get_peft_model(base_model, lora_config)
    
    return lora_model
    
if __name__ == '__main__':
    if os.name == 'nt':
        import multiprocessing
        multiprocessing.freeze_support()
    main()

