"""
WenwuDataset适配器 - 用于CN-CLIP模型评估
将WenwuDataset适配为CN-CLIP评估所需的格式
"""

import torch
from torch.utils.data import Dataset
import cn_clip.clip as clip
import logging
from typing import List, Tuple, Dict, Any

logger = logging.getLogger(__name__)


class WenwuDatasetAdapter(Dataset):
    """
    WenwuDataset适配器
    包装现有的WenwuDataset，提供CN-CLIP兼容的接口
    """
    
    def __init__(self, wenwu_dataset, preprocess=None):
        """
        Args:
            wenwu_dataset: WenwuDataset实例
            preprocess: CN-CLIP预处理函数 (可选，如果为None则使用dataset自带的预处理)
        """
        self.wenwu_dataset = wenwu_dataset
        self.preprocess = preprocess
        
        logger.info(f"WenwuDatasetAdapter initialized with {len(wenwu_dataset)} samples")
    
    def __len__(self):
        return len(self.wenwu_dataset)
    
    def __getitem__(self, idx) -> Tuple[torch.Tensor, torch.Tensor, Dict[str, Any]]:
        """
        返回CN-CLIP评估所需的格式
        
        Returns:
            image_tensor: 预处理后的图像张量
            text_tensor: tokenized文本张量  
            metadata: 元数据字典
        """
        try:
            # 从WenwuDataset获取数据
            image_tensor, caption, img_id = self.wenwu_dataset[idx]
            
            # 如果提供了自定义预处理函数，使用它
            if self.preprocess is not None:
                # 需要将tensor转回PIL图像，然后重新预处理
                # 这里我们假设WenwuDataset已经正确处理了图像
                # 如果需要重新预处理，可以从原始路径重新加载图像
                pass
            
            # 使用CN-CLIP的tokenizer处理文本
            text_tensor = clip.tokenize([caption], context_length=77)[0]
            
            # 构建元数据
            metadata = {
                "image_id": img_id,
                "caption": caption,
                "index": idx
            }
            
            return image_tensor, text_tensor, metadata
            
        except Exception as e:
            logger.error(f"Error processing item {idx}: {e}")
            # 返回dummy数据避免中断
            dummy_image = torch.zeros(3, 224, 224)
            dummy_text = clip.tokenize([""])[0]
            return dummy_image, dummy_text, {"error": str(e), "index": idx}


def create_adapted_dataloader(wenwu_dataset, 
                             batch_size: int = 32,
                             shuffle: bool = False,
                             num_workers: int = 2,
                             preprocess=None):
    """
    创建适配的数据加载器
    
    Args:
        wenwu_dataset: WenwuDataset实例
        batch_size: 批次大小
        shuffle: 是否打乱数据
        num_workers: 工作进程数
        preprocess: CN-CLIP预处理函数
        
    Returns:
        DataLoader实例
    """
    from torch.utils.data import DataLoader
    
    adapter = WenwuDatasetAdapter(wenwu_dataset, preprocess)
    
    return DataLoader(
        adapter,
        batch_size=batch_size,
        shuffle=shuffle,
        num_workers=num_workers,
        pin_memory=torch.cuda.is_available(),
        collate_fn=adapted_collate_fn
    )


def adapted_collate_fn(batch):
    """
    自定义批处理函数
    过滤掉错误样本并正确堆叠张量
    """
    valid_images, valid_texts, valid_metadata = [], [], []
    
    for image_tensor, text_tensor, metadata in batch:
        if "error" not in metadata:
            valid_images.append(image_tensor)
            valid_texts.append(text_tensor)
            valid_metadata.append(metadata)
    
    if not valid_images:
        logger.warning("Batch contains no valid samples")
        return torch.tensor([]), torch.tensor([]), []
    
    # 堆叠张量
    images = torch.stack(valid_images)
    texts = torch.stack(valid_texts)
    
    return images, texts, valid_metadata


if __name__ == "__main__":
    # 测试适配器
    import sys
    sys.path.append("/root/WenwuClip")
    from codebase.dataset.dataset import get_test_set
    
    print("Testing WenwuDatasetAdapter...")
    
    # 创建小样本测试
    wenwu_dataset = get_test_set(data_scale=0.01)  # 小样本测试
    adapter = WenwuDatasetAdapter(wenwu_dataset)
    
    print(f"Original dataset size: {len(wenwu_dataset)}")
    print(f"Adapter size: {len(adapter)}")
    
    # 测试单个样本
    if len(adapter) > 0:
        image, text, metadata = adapter[0]
        print(f"Image shape: {image.shape}")
        print(f"Text shape: {text.shape}")  
        print(f"Metadata: {metadata}")
    
    # 测试DataLoader
    dataloader = create_adapted_dataloader(wenwu_dataset, batch_size=4)
    for batch_images, batch_texts, batch_metadata in dataloader:
        print(f"Batch - Images: {batch_images.shape}, Texts: {batch_texts.shape}")
        print(f"Batch metadata count: {len(batch_metadata)}")
        break