import os
import cv2
import numpy as np
import json
from torch.utils.data import Dataset, DataLoader
from sklearn.model_selection import train_test_split
from torchvision import transforms
from PIL import Image

class MultilingualHandwritingDataset(Dataset):
    def __init__(self, data_dir='handwriting_data', train=True, test_size=0.2, transform=None):
        """
        参数:
            data_dir (str): 数据集根目录
            train (bool): True表示加载训练集，False表示加载测试集
            test_size (float): 测试集比例，范围(0,1)
            transform: 数据转换操作
        """
        self.data_dir = data_dir
        self.transform = transform
        
        # 加载或创建字符到索引的映射
        self.char_to_idx = {}
        self.idx_to_char = {}
        self.load_or_create_char_mapping()
        
        # 收集所有图片路径和标签
        self.image_paths = []
        self.labels = []
        
        # 遍历所有语言文件夹
        for lang_folder in os.listdir(data_dir):
            lang_path = os.path.join(data_dir, lang_folder)
            if os.path.isdir(lang_path):
                # 遍历每个字符文件夹
                for char_folder in os.listdir(lang_path):
                    char_path = os.path.join(lang_path, char_folder)
                    if os.path.isdir(char_path):
                        # 获取或创建字符的索引
                        if char_folder not in self.char_to_idx:
                            idx = len(self.char_to_idx)
                            self.char_to_idx[char_folder] = idx
                            self.idx_to_char[idx] = char_folder
                        
                        # 获取该字符下所有图片
                        for img_name in os.listdir(char_path):
                            if img_name.endswith(('.png', '.jpg', '.jpeg')):
                                img_path = os.path.join(char_path, img_name)
                                self.image_paths.append(img_path)
                                self.labels.append(self.char_to_idx[char_folder])
        
        # 保存更新后的映射
        self.save_char_mapping()
        
        # 划分训练集和测试集
        X_train, X_test, y_train, y_test = train_test_split(
            self.image_paths, self.labels, 
            test_size=test_size, 
            random_state=42,
            stratify=self.labels
        )
        
        self.image_paths = X_train if train else X_test
        self.labels = y_train if train else y_test
        
        # 默认的图像转换
        if self.transform is None:
            self.transform = transforms.Compose([
                transforms.ToTensor(),
                transforms.Normalize((0.5,), (0.5,))  # 归一化到[-1,1]
            ])

    def load_or_create_char_mapping(self):
        """加载或创建字符映射"""
        mapping_file = 'char_mapping.json'
        if os.path.exists(mapping_file):
            with open(mapping_file, 'r', encoding='utf-8') as f:
                mapping = json.load(f)
                self.char_to_idx = mapping['char_to_idx']
                self.idx_to_char = {int(k): v for k, v in mapping['idx_to_char'].items()}
        else:
            self.char_to_idx = {}
            self.idx_to_char = {}

    def save_char_mapping(self):
        """保存字符映射"""
        mapping = {
            'char_to_idx': self.char_to_idx,
            'idx_to_char': self.idx_to_char
        }
        with open('char_mapping.json', 'w', encoding='utf-8') as f:
            json.dump(mapping, f, ensure_ascii=False, indent=2)

    def preprocess_image(self, image_path):
        """预处理图像，使其与MNIST格式一致"""
        # 读取图像
        image = cv2.imread(image_path)
        if image is None:
            raise ValueError(f"无法读取图像: {image_path}")
            
        # 转换为灰度图
        if len(image.shape) == 3:
            image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
            
        # 图像二值化
        _, image = cv2.threshold(image, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
            
        # 查找轮廓
        contours, _ = cv2.findContours(image, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
        if contours:
            # 找到最大的轮廓（主要数字）
            max_contour = max(contours, key=cv2.contourArea)
            x, y, w, h = cv2.boundingRect(max_contour)
            
            # 提取数字区域，添加边距
            padding = 20
            y1 = max(y - padding, 0)
            y2 = min(y + h + padding, image.shape[0])
            x1 = max(x - padding, 0)
            x2 = min(x + w + padding, image.shape[1])
            image = image[y1:y2, x1:x2]
        
        # 调整大小为28x28
        image = cv2.resize(image, (28, 28))
        
        # 确保黑底白字（与MNIST一致）
        if np.mean(image[0, :]) > 127 or np.mean(image[-1, :]) > 127:
            image = 255 - image
            
        return image

    def __len__(self):
        return len(self.image_paths)

    def __getitem__(self, idx):
        img_path = self.image_paths[idx]
        
        try:
            # 预处理图像
            image = self.preprocess_image(img_path)
            
            # 转换为PyTorch张量并归一化
            if isinstance(self.transform, transforms.Compose):
                image = Image.fromarray(image)
            image = self.transform(image)
            
            return image, self.labels[idx]
            
        except Exception as e:
            print(f"处理图像时出错 {img_path}: {str(e)}")
            # 返回一个空白图像
            blank_image = np.zeros((28, 28), dtype=np.uint8)
            return self.transform(blank_image), self.labels[idx]

    @property
    def num_classes(self):
        """获取类别数量"""
        return len(self.char_to_idx)

def get_data_loaders(data_dir='handwriting_data', batch_size=32, test_size=0.2, num_workers=0):
    """
    创建训练集和测试集的数据加载器
    
    参数:
        data_dir (str): 数据集根目录
        batch_size (int): 批次大小
        test_size (float): 测试集比例
        num_workers (int): 数据加载的线程数
    
    返回:
        train_loader, test_loader, num_classes
    """
    # 创建训练集
    train_dataset = MultilingualHandwritingDataset(
        data_dir=data_dir,
        train=True,
        test_size=test_size
    )
    
    # 创建测试集
    test_dataset = MultilingualHandwritingDataset(
        data_dir=data_dir,
        train=False,
        test_size=test_size
    )
    
    print(f"总类别数: {train_dataset.num_classes}")
    print(f"训练集样本数: {len(train_dataset)}")
    print(f"测试集样本数: {len(test_dataset)}")
    
    # 创建数据加载器
    train_loader = DataLoader(
        train_dataset,
        batch_size=batch_size,
        shuffle=True,
        num_workers=num_workers
    )
    
    test_loader = DataLoader(
        test_dataset,
        batch_size=batch_size,
        shuffle=False,
        num_workers=num_workers
    )
    
    return train_loader, test_loader, train_dataset.num_classes 