"""
文物数据集适配器 - 用于CN-CLIP原始模型测试
"""

import sys
import os
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))

import torch
from torch.utils.data import Dataset, DataLoader
from PIL import Image
import cn_clip.clip as clip
from dataset.dataset import get_index_data, build_captions, get_image
import logging
from typing import List, Tuple, Dict
import json
from pathlib import Path

logger = logging.getLogger(__name__)


class WenwuRawDataset(Dataset):
    """
    文物数据集适配器 - 专门用于CN-CLIP原始模型测试
    不进行任何数据增强，只使用CN-CLIP官方预处理
    """
    
    def __init__(self, start_p=0.0, end_p=1.0, preprocess=None):
        """
        Args:
            start_p: 数据集起始比例 (0.0-1.0)
            end_p: 数据集结束比例 (0.0-1.0)  
            preprocess: CN-CLIP预处理函数
        """
        super().__init__()
        self.preprocess = preprocess
        
        # 加载索引数据
        try:
            index_data = get_index_data()
            full_data = index_data["index"]
            self.metadata = {
                "dynasties": index_data["dynasties"],
                "categories": index_data["categories"], 
                "types": index_data["types"],
                "total_images": len(index_data["images"])
            }
            
            # 按比例切分数据
            start_idx = int(start_p * len(full_data))
            end_idx = int(end_p * len(full_data))
            self.data = full_data[start_idx:end_idx]
            
            logger.info(f"Dataset loaded: {len(self.data)} samples from {start_p:.2f} to {end_p:.2f}")
            logger.info(f"Metadata: {len(self.metadata['dynasties'])} dynasties, "
                       f"{len(self.metadata['categories'])} categories, "
                       f"{len(self.metadata['types'])} types")
                       
        except Exception as e:
            logger.error(f"Failed to load dataset: {e}")
            raise
    
    def __len__(self):
        return len(self.data)
    
    def __getitem__(self, idx) -> Tuple[torch.Tensor, torch.Tensor, Dict]:
        """
        Returns:
            image_tensor: 预处理后的图像张量
            text_tensor: tokenized文本张量
            metadata: 包含原始信息的字典
        """
        try:
            original_id, image_path, caption, img_id = self.data[idx]
            
            # 加载图像
            image = get_image(str(image_path))
            
            # CN-CLIP预处理 (如果提供)
            if self.preprocess is not None:
                image_tensor = self.preprocess(image)
            else:
                # 默认转换为张量
                image_tensor = torch.tensor([])
            
            # 文本tokenization - 现在使用CN-CLIP的tokenize
            text_tensor = clip.tokenize([caption], context_length=77)[0]
            
            # 元数据
            metadata = {
                "original_id": original_id,
                "image_id": img_id,
                "caption": caption,
                "image_path": str(image_path),
                "index": idx
            }
            
            return image_tensor, text_tensor, metadata
            
        except Exception as e:
            logger.error(f"Error loading item {idx}: {e}")
            # 返回空数据避免训练中断
            return torch.tensor([]), torch.tensor([]), {"error": str(e)}
    
    def get_statistics(self) -> Dict:
        """获取数据集统计信息"""
        dynasty_counts = {}
        category_counts = {}
        caption_lengths = []
        
        for _, _, caption, _ in self.data:
            caption_lengths.append(len(caption))
            
        return {
            "total_samples": len(self.data),
            "avg_caption_length": sum(caption_lengths) / len(caption_lengths) if caption_lengths else 0,
            "max_caption_length": max(caption_lengths) if caption_lengths else 0,
            "min_caption_length": min(caption_lengths) if caption_lengths else 0,
            "unique_images": len(set(item[3] for item in self.data)),
            "metadata": self.metadata
        }


def create_raw_dataloader(dataset: WenwuRawDataset, 
                         batch_size: int = 32,
                         shuffle: bool = False,
                         num_workers: int = 2) -> DataLoader:
    """创建数据加载器"""
    return DataLoader(
        dataset,
        batch_size=batch_size,
        shuffle=shuffle,
        num_workers=num_workers,
        pin_memory=torch.cuda.is_available(),
        collate_fn=raw_collate_fn
    )


def raw_collate_fn(batch):
    """
    自定义批处理函数，处理可能的加载错误
    """
    images, texts, metadatas = [], [], []
    
    for image_tensor, text_tensor, metadata in batch:
        if "error" not in metadata:
            images.append(image_tensor)
            texts.append(text_tensor)
            metadatas.append(metadata)
    
    if not images:
        return torch.tensor([]), torch.tensor([]), []
    
    # 堆叠张量
    images = torch.stack(images)
    texts = torch.stack(texts)
    
    return images, texts, metadatas


if __name__ == "__main__":
    # 测试数据集加载
    print("Testing WenwuRawDataset...")
    
    # 创建小样本测试
    dataset = WenwuRawDataset(start_p=0.0, end_p=0.01)  # 1%的数据
    print(f"Dataset size: {len(dataset)}")
    print("Statistics:", dataset.get_statistics())
    
    # 测试单个样本
    if len(dataset) > 0:
        image, text, meta = dataset[0]
        print(f"Sample 0 - Image shape: {image.shape if hasattr(image, 'shape') else 'No shape'}")
        print(f"Sample 0 - Text shape: {text.shape if hasattr(text, 'shape') else 'No shape'}")
        print(f"Sample 0 - Metadata: {meta}")