from torch.utils.data import Dataset
import os
import torch
from torchvision import transforms
from src.configs import base_config

class FontDataset(Dataset):
    def __init__(self, transform=None):
        super().__init__()
        self.transform = transform
        self.training_mode = False
        
        tensor_dir = os.path.join(base_config['data']['base_dir'], 
                                base_config['data']['processed']['tensor_dir'])
        token_dir = os.path.join(base_config['data']['base_dir'],
                               base_config['data']['processed']['token_dir'])
        
        if not os.path.exists(tensor_dir):
            raise FileNotFoundError(f"Tensor目录不存在: {tensor_dir}")
        if not os.path.exists(token_dir):
            raise FileNotFoundError(f"Token目录不存在: {token_dir}")
        
        self.samples = []
        
        # 遍历tensor目录建立文件映射
        for font_name in os.listdir(tensor_dir):
            # 跳过隐藏文件和目录
            if font_name.startswith('.') or not os.path.isdir(os.path.join(tensor_dir, font_name)):
                continue
                
            font_tensor_path = os.path.join(tensor_dir, font_name)
            font_token_path = os.path.join(token_dir, font_name)
            
            if os.path.isdir(font_tensor_path) and os.path.exists(font_token_path):
                for char_file in os.listdir(font_tensor_path):
                    # 确保只处理.pt文件且不是隐藏文件
                    if char_file.endswith('.pt') and not char_file.startswith('.'):
                        # 修改文件名提取逻辑
                        char_code = char_file.split('_')[-1]  # 提取U+XXXX部分
                        token_file = char_code  # tokens目录直接使用U+XXXX.pt
                        token_path = os.path.join(font_token_path, token_file)
                        
                        if os.path.exists(token_path):
                            self.samples.append((
                                os.path.join(font_tensor_path, char_file),
                                token_path
                            ))
                        else:
                            print(f"警告: 缺少token文件 {token_path}")
        
        # 添加路径打印调试
        print(f"实际加载路径:")
        print(f"- 张量目录: {tensor_dir}")
        print(f"- Token目录: {token_dir}")
        print(f"字体子目录数量: {len(os.listdir(tensor_dir))}")

    def __len__(self):
        return len(self.samples)

    def __getitem__(self, idx):
        tensor_path, token_path = self.samples[idx]
        
        # 加载图像
        image = torch.load(tensor_path).float()
        image = (image - 0.5) * 2  # 归一化到[-1,1]
        
        # 加载token并记录原始长度
        tokens = torch.load(token_path).long()
        seq_len = len(tokens)
        
        # 修复图像通道问题
        if image.size(0) == 1:  # 单通道转三通道
            image = image.repeat(3, 1, 1)
        elif image.size(0) == 4:  # 处理RGBA情况
            image = image[:3]
        
        return {
            'image': image,
            'tokens': tokens,
            'seq_len': seq_len  # 添加序列长度信息
        }

    def _preprocess_image(self, img_tensor):
        """统一图像预处理流程"""
        # 确保图像为三通道
        if img_tensor.shape[0] == 1:
            img_tensor = img_tensor.repeat(3, 1, 1)
        
        # 标准化到[0,1]范围
        return img_tensor / 255.0 if img_tensor.max() > 1.0 else img_tensor

    def _augment_image(self, img):
        """训练时使用的数据增强（仅在training_mode=True时生效）"""
        aug = transforms.Compose([
            transforms.RandomAffine(degrees=5, translate=(0.02, 0.02)),
            transforms.ColorJitter(brightness=0.2, contrast=0.2)
        ])
        return aug(img)

    def enable_training_mode(self, enabled=True):
        """切换训练模式（启用/禁用数据增强）"""
        self.training_mode = enabled


def collate_fn(batch):
    # 找出batch中最长的序列
    max_len = max(item['seq_len'] for item in batch)
    
    # 对每个样本进行padding
    padded_batch = {
        'image': torch.stack([item['image'] for item in batch]),
        'tokens': torch.stack([
            torch.cat([
                item['tokens'], 
                torch.zeros(max_len - item['seq_len'], dtype=torch.long)
            ]) for item in batch
        ]),
        'seq_len': torch.tensor([item['seq_len'] for item in batch])
    }
    return padded_batch