import cv2
import numpy as np
import os.path as osp
import torch
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms
import random

class TID2013RankDataset(Dataset):
    """TID2013 Rank Dataset for PyTorch"""
    
    def __init__(self, 
                 pascal_root, 
                 split, 
                 im_shape=(224, 224),
                 transform=None):
        """
        初始化 TID2013 数据集
        
        参数:
            pascal_root: 数据根目录
            split: 训练集或测试集 ('train' 'val' 'test')
            im_shape: 图像尺寸 (高度, 宽度)
            transform: PyTorch 变换
        """
        self.pascal_root = pascal_root
        self.im_shape = im_shape
        
        # 如果没有提供转换，创建默认转换
        if transform is None:
            self.transform = transforms.Compose([
                transforms.ToPILImage(),
                transforms.RandomCrop(im_shape[0]),
                transforms.ToTensor(),
            ])
        else:
            self.transform = transform
        
        # 读取图像列表
        list_file = f"{split}.txt"
        filename = [line.rstrip('\n') for line in open(
            osp.join(self.pascal_root, list_file))]
        
        self._roidb = []
        self.scores = []
        for i in filename:
            self._roidb.append(i.split()[0])
            self.scores.append(float(i.split()[1]))
        
        # TID2013 数据集参数
        self.dis = 18      # TID2013 中生成的总失真数
        self.batch = 1     # 每个失真级别的图像数
        self.level = 5     # 每个失真的级别数
        self.dis_mini = 9  # 一个小批次中的失真数
        
        self.num = 0
        self.Num = len(self.scores) // self.dis // self.level
        
        # 生成索引列表
        self.indices = self._generate_indices()
    
    def _generate_indices(self):
        """生成数据集的索引列表，保持原始采样策略"""
        all_indices = []
        while len(all_indices) < len(self._roidb):
            batch_indices = self._get_next_minibatch_inds()
            all_indices.extend([int(idx) for idx in batch_indices])
            # 避免重复
            if len(all_indices) > len(self._roidb):
                all_indices = all_indices[:len(self._roidb)]
        return all_indices
    
    def __len__(self):
        """返回数据集大小"""
        return len(self._roidb)
    
    def _get_next_minibatch_inds(self):
        """返回下一个小批次的索引，保持原始采样策略"""
        db_inds = []
        shuff = np.random.permutation(range(self.dis))
        
        for k in shuff[:self.dis_mini]:
            for i in range(self.level):
                temp = self.num
                for j in range(self.batch):
                    db_inds.append(len(self.scores) // self.dis * k + i * self.Num + temp)
                    temp = temp + 1
        
        self.num = self.num + self.batch
        if self.Num - self.num < self.batch:
            self.num = 0
            
        return np.asarray(db_inds)
    
    def __getitem__(self, idx):
        """获取数据集中的一个样本"""
        # 使用生成的索引列表
        actual_idx = self.indices[idx % len(self.indices)]
        
        img_path = self._roidb[actual_idx]
        score = self.scores[actual_idx]
        
        # 读取图像
        img = cv2.imread(img_path)
        if img is None:
            raise ValueError(f"无法读取图像: {img_path}")
            
        # OpenCV 读取的是 BGR，转换为 RGB
        img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
        
        # 应用转换
        img_tensor = self.transform(img)
            
        return {
            'data': img_tensor,
            'label': torch.tensor([score], dtype=torch.float32)
        }


def create_dataloader(
    pascal_root,
    split,
    batch_size=45,
    im_shape=(224, 224),
    shuffle=True,
    num_workers=10,
    transform=None
):
    """
    创建 TID2013 数据加载器
    
    参数:
        pascal_root: 数据根目录
        split: 训练集或测试集 ('train' 或 'test')
        batch_size: 批次大小
        im_shape: 图像尺寸 (高度, 宽度)
        shuffle: 是否打乱数据
        num_workers: 数据加载的工作线程数
        transform: PyTorch 变换
        
    返回:
        PyTorch DataLoader 对象
    """
    dataset = TID2013RankDataset(
        pascal_root=pascal_root,
        split=split,
        im_shape=im_shape,
        transform=transform
    )
    
    # 自定义批次收集函数
    def collate_fn(batch):
        data = torch.stack([item['data'] for item in batch])
        labels = torch.stack([item['label'] for item in batch])
        return {'data': data, 'label': labels}
    
    return DataLoader(
        dataset,
        batch_size=batch_size,
        shuffle=shuffle,
        num_workers=num_workers,
        collate_fn=collate_fn,
        pin_memory=True
    )


# 使用示例
if __name__ == "__main__":
    # 创建数据加载器
    data_loader = create_dataloader(
        pascal_root="/home/qingyu314/workspace/myRankIQA/data/rank_tid2013",
        split="train",
        batch_size=45,
        im_shape=(224, 224)
    )
    
    # 迭代数据
    for batch in data_loader:
        inputs = batch['data']
        labels = batch['label']
        print(f"输入形状: {inputs.shape}, 标签形状: {labels.shape}")
        break  # 只打印第一个批次