import os
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms
import numpy as np
from PIL import Image
import json
from transformers import AutoTokenizer, AutoModel
import math
from tqdm import tqdm
import random
import logging

# 配置日志
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)

# 设置随机种子确保可复现性
def set_seed(seed):
    random.seed(seed)
    np.random.seed(seed)
    torch.manual_seed(seed)
    torch.cuda.manual_seed_all(seed)

set_seed(42)

class ColPaliConfig:
    """ColPali模型配置类"""
    def __init__(
        self,
        image_size=224,
        patch_size=16,
        hidden_size=768,
        num_attention_heads=12,
        num_hidden_layers=12,
        mlp_dim=3072,
        max_text_length=512,
        projection_dim=128,
        dropout=0.1,
        vision_model_name="google/vit-base-patch16-224",
        text_model_name="bert-base-uncased",
        pretrained=True,
        device="cuda" if torch.cuda.is_available() else "cpu"
    ):
        self.image_size = image_size
        self.patch_size = patch_size
        self.hidden_size = hidden_size
        self.num_attention_heads = num_attention_heads
        self.num_hidden_layers = num_hidden_layers
        self.mlp_dim = mlp_dim
        self.max_text_length = max_text_length
        self.projection_dim = projection_dim
        self.dropout = dropout
        self.vision_model_name = vision_model_name
        self.text_model_name = text_model_name
        self.pretrained = pretrained
        self.device = device

class ImageEncoder(nn.Module):
    """图像编码器 - 基于Vision Transformer"""
    def __init__(self, config):
        super().__init__()
        self.config = config
        
        # 使用预训练的ViT模型
        if config.pretrained:
            from transformers import ViTModel
            self.vit = ViTModel.from_pretrained(config.vision_model_name)
        else:
            from transformers import ViTConfig, ViTModel
            vit_config = ViTConfig(
                hidden_size=config.hidden_size,
                num_hidden_layers=config.num_hidden_layers,
                num_attention_heads=config.num_attention_heads,
                intermediate_size=config.mlp_dim,
                patch_size=config.patch_size,
                image_size=config.image_size
            )
            self.vit = ViTModel(vit_config)
        
        # 投影层将ViT输出映射到公共空间
        self.projection = nn.Linear(self.vit.config.hidden_size, config.projection_dim)
        self.norm = nn.LayerNorm(config.projection_dim)
        
    def forward(self, pixel_values):
        # 获取ViT的输出
        outputs = self.vit(pixel_values=pixel_values)
        # 使用最后一层的hidden states (batch_size, sequence_length, hidden_size)
        last_hidden_state = outputs.last_hidden_state
        
        # 对每个patch的表示进行投影
        projected_features = self.projection(last_hidden_state)
        projected_features = self.norm(projected_features)
        
        return projected_features

class TextEncoder(nn.Module):
    """文本编码器 - 基于BERT类模型"""
    def __init__(self, config):
        super().__init__()
        self.config = config
        
        # 使用预训练的文本模型
        if config.pretrained:
            self.text_model = AutoModel.from_pretrained(config.text_model_name)
        else:
            from transformers import AutoConfig
            text_config = AutoConfig.from_pretrained(config.text_model_name)
            self.text_model = AutoModel.from_config(text_config)
        
        # 投影层将文本表示映射到公共空间
        self.projection = nn.Linear(self.text_model.config.hidden_size, config.projection_dim)
        self.norm = nn.LayerNorm(config.projection_dim)
        
    def forward(self, input_ids, attention_mask):
        # 获取文本模型的输出
        outputs = self.text_model(
            input_ids=input_ids,
            attention_mask=attention_mask
        )
        # 使用最后一层的hidden states (batch_size, sequence_length, hidden_size)
        last_hidden_state = outputs.last_hidden_state
        
        # 对每个token的表示进行投影
        projected_features = self.projection(last_hidden_state)
        projected_features = self.norm(projected_features)
        
        return projected_features

class ColPali(nn.Module):
    """ColPali模型 - 结合图像和文本编码器"""
    def __init__(self, config):
        super().__init__()
        self.config = config
        self.image_encoder = ImageEncoder(config)
        self.text_encoder = TextEncoder(config)
        
        # 温度参数，控制对比学习的强度
        self.temperature = nn.Parameter(torch.tensor(1.0))
        
    def forward(self, pixel_values, input_ids, attention_mask):
        # 获取图像和文本的投影特征
        image_features = self.image_encoder(pixel_values)
        text_features = self.text_encoder(input_ids, attention_mask)
        
        return {
            "image_features": image_features,
            "text_features": text_features,
            "temperature": self.temperature
        }
    
    def compute_similarity(self, image_features, text_features):
        """计算图像和文本特征之间的相似度"""
        # 规范化特征
        image_features = F.normalize(image_features, p=2, dim=-1)
        text_features = F.normalize(text_features, p=2, dim=-1)
        
        # 计算所有patch和token之间的相似度矩阵
        # (batch_size, num_patches, projection_dim) x (batch_size, projection_dim, num_tokens)
        # -> (batch_size, num_patches, num_tokens)
        similarity_matrix = torch.bmm(image_features, text_features.transpose(1, 2))
        
        # 应用温度缩放
        similarity_matrix = similarity_matrix * torch.exp(self.temperature)
        
        return similarity_matrix

class DocumentDataset(Dataset):
    """文档数据集 - 处理图像和文本对"""
    def __init__(self, data_dir, tokenizer, max_text_length=512, image_size=224):
        self.data_dir = data_dir
        self.tokenizer = tokenizer
        self.max_text_length = max_text_length
        
        # 图像预处理
        self.image_transform = transforms.Compose([
            transforms.Resize((image_size, image_size)),
            transforms.ToTensor(),
            transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
        ])
        
        # 加载数据
        self.data = self._load_data()
        
    def _load_data(self):
        """加载图像和文本对"""
        data = []
        
        # 假设数据目录下有images和texts两个子目录
        images_dir = os.path.join(self.data_dir, "images")
        texts_dir = os.path.join(self.data_dir, "texts")
        
        # 获取所有图像文件
        image_files = [f for f in os.listdir(images_dir) if f.endswith(('.jpg', '.png', '.jpeg'))]
        
        for image_file in image_files:
            # 获取对应的文本文件
            base_name = os.path.splitext(image_file)[0]
            text_file = os.path.join(texts_dir, f"{base_name}.txt")
            
            # 检查文本文件是否存在
            if os.path.exists(text_file):
                # 读取文本
                with open(text_file, 'r', encoding='utf-8') as f:
                    text = f.read()
                
                data.append({
                    "image_path": os.path.join(images_dir, image_file),
                    "text": text
                })
        
        return data
    
    def __len__(self):
        return len(self.data)
    
    def __getitem__(self, idx):
        item = self.data[idx]
        
        # 加载并预处理图像
        image = Image.open(item["image_path"]).convert("RGB")
        pixel_values = self.image_transform(image)
        
        # 处理文本
        encoding = self.tokenizer(
            item["text"],
            truncation=True,
            max_length=self.max_text_length,
            padding="max_length",
            return_tensors="pt"
        )
        
        # 从batch维度中解包
        input_ids = encoding["input_ids"].squeeze(0)
        attention_mask = encoding["attention_mask"].squeeze(0)
        
        return {
            "pixel_values": pixel_values,
            "input_ids": input_ids,
            "attention_mask": attention_mask
        }

def compute_colbert_loss(similarity_matrix):
    """计算ColBERT风格的损失函数"""
    batch_size = similarity_matrix.shape[0]
    
    # 计算图像到文本的损失
    image_to_text = -torch.logsumexp(similarity_matrix, dim=2)
    image_to_text = image_to_text.mean()
    
    # 计算文本到图像的损失
    text_to_image = -torch.logsumexp(similarity_matrix.transpose(1, 2), dim=2)
    text_to_image = text_to_image.mean()
    
    # 总损失
    loss = (image_to_text + text_to_image) / 2
    
    return loss

def train_colpali(config, train_dataset, val_dataset=None, num_epochs=10, lr=1e-4):
    """训练ColPali模型"""
    # 初始化tokenizer
    tokenizer = AutoTokenizer.from_pretrained(config.text_model_name)
    
    # 创建数据加载器
    train_dataloader = DataLoader(
        train_dataset,
        batch_size=config.batch_size,
        shuffle=True,
        num_workers=config.num_workers
    )
    
    if val_dataset:
        val_dataloader = DataLoader(
            val_dataset,
            batch_size=config.batch_size,
            shuffle=False,
            num_workers=config.num_workers
        )
    
    # 初始化模型
    model = ColPali(config).to(config.device)
    
    # 优化器
    optimizer = torch.optim.AdamW(
        model.parameters(),
        lr=lr,
        weight_decay=config.weight_decay
    )
    
    # 学习率调度器
    total_steps = len(train_dataloader) * num_epochs
    scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
        optimizer,
        T_max=total_steps
    )
    
    # 训练循环
    for epoch in range(num_epochs):
        model.train()
        total_loss = 0.0
        
        # 训练进度条
        progress_bar = tqdm(enumerate(train_dataloader), total=len(train_dataloader))
        
        for step, batch in progress_bar:
            # 将数据移至设备
            pixel_values = batch["pixel_values"].to(config.device)
            input_ids = batch["input_ids"].to(config.device)
            attention_mask = batch["attention_mask"].to(config.device)
            
            # 前向传播
            outputs = model(pixel_values, input_ids, attention_mask)
            
            # 计算相似度矩阵
            similarity_matrix = model.compute_similarity(
                outputs["image_features"],
                outputs["text_features"]
            )
            
            # 计算损失
            loss = compute_colbert_loss(similarity_matrix)
            
            # 反向传播
            optimizer.zero_grad()
            loss.backward()
            
            # 梯度裁剪
            torch.nn.utils.clip_grad_norm_(model.parameters(), config.max_grad_norm)
            
            # 优化器更新
            optimizer.step()
            scheduler.step()
            
            # 更新进度条
            total_loss += loss.item()
            progress_bar.set_description(f"Epoch {epoch+1}/{num_epochs}, Loss: {total_loss/(step+1):.4f}")
        
        # 记录每个epoch的平均损失
        avg_train_loss = total_loss / len(train_dataloader)
        logger.info(f"Epoch {epoch+1}/{num_epochs}, Average Training Loss: {avg_train_loss:.4f}")
        
        # 验证
        if val_dataset:
            model.eval()
            val_loss = 0.0
            
            with torch.no_grad():
                for batch in val_dataloader:
                    # 将数据移至设备
                    pixel_values = batch["pixel_values"].to(config.device)
                    input_ids = batch["input_ids"].to(config.device)
                    attention_mask = batch["attention_mask"].to(config.device)
                    
                    # 前向传播
                    outputs = model(pixel_values, input_ids, attention_mask)
                    
                    # 计算相似度矩阵
                    similarity_matrix = model.compute_similarity(
                        outputs["image_features"],
                        outputs["text_features"]
                    )
                    
                    # 计算损失
                    loss = compute_colbert_loss(similarity_matrix)
                    val_loss += loss.item()
            
            # 记录验证损失
            avg_val_loss = val_loss / len(val_dataloader)
            logger.info(f"Epoch {epoch+1}/{num_epochs}, Validation Loss: {avg_val_loss:.4f}")
        
        # 保存模型检查点
        if (epoch + 1) % config.save_every == 0:
            save_path = os.path.join(config.output_dir, f"colpali_epoch_{epoch+1}")
            os.makedirs(save_path, exist_ok=True)
            
            # 保存模型和配置
            torch.save(model.state_dict(), os.path.join(save_path, "model.pt"))
            with open(os.path.join(save_path, "config.json"), "w") as f:
                json.dump(config.__dict__, f)
            
            logger.info(f"Model saved to {save_path}")
    
    return model

def main():
    # 配置
    config = ColPaliConfig(
        image_size=224,
        patch_size=16,
        hidden_size=768,
        num_attention_heads=12,
        num_hidden_layers=12,
        mlp_dim=3072,
        max_text_length=512,
        projection_dim=128,
        dropout=0.1,
        vision_model_name="google/vit-base-patch16-224",
        text_model_name="bert-base-uncased",
        pretrained=True,
        batch_size=8,
        num_workers=4,
        weight_decay=0.01,
        max_grad_norm=1.0,
        save_every=1,
        output_dir="colpali_checkpoints"
    )
    
    # 创建输出目录
    os.makedirs(config.output_dir, exist_ok=True)
    
    # 初始化tokenizer
    tokenizer = AutoTokenizer.from_pretrained(config.text_model_name)
    
    # 加载数据集
    train_dataset = DocumentDataset(
        data_dir="path/to/train/data",
        tokenizer=tokenizer,
        max_text_length=config.max_text_length,
        image_size=config.image_size
    )
    
    # 可选：加载验证集
    val_dataset = None
    if os.path.exists("path/to/val/data"):
        val_dataset = DocumentDataset(
            data_dir="path/to/val/data",
            tokenizer=tokenizer,
            max_text_length=config.max_text_length,
            image_size=config.image_size
        )
    
    # 训练模型
    model = train_colpali(
        config=config,
        train_dataset=train_dataset,
        val_dataset=val_dataset,
        num_epochs=10,
        lr=1e-4
    )
    
    # 保存最终模型
    save_path = os.path.join(config.output_dir, "colpali_final")
    os.makedirs(save_path, exist_ok=True)
    
    torch.save(model.state_dict(), os.path.join(save_path, "model.pt"))
    with open(os.path.join(save_path, "config.json"), "w") as f:
        json.dump(config.__dict__, f)
    
    logger.info(f"Final model saved to {save_path}")

if __name__ == "__main__":
    main()    