import os
import torch
from torch.utils.data import DataLoader, Dataset, Sampler
from PIL import Image
import matplotlib.pyplot as plt
import torchvision.transforms as transforms
from pathlib import Path
import pandas as pd
import numpy as np
from collections import defaultdict

from utils import detect_content_box

from IPython import embed

class ZhCharDataset_96(Dataset):

    def __init__(self, root='./dataset', mod='train', font_folder='./data/fonts', transform=False) -> None:
        """
        root： the folder contain 'train', 'val', 'test' sub folder
        """
        super(ZhCharDataset_96, self).__init__()
        self.root = root
        self.mod = mod
        self.csv = os.path.join(root, mod) + '.csv'
        self.font_csv = os.path.join(root, 'fonts.csv')
        self.transform = transform
        self.transforms = transforms.Compose([
            # transforms.Lambda(lambda img: img.crop(detect_content_box(img))),
            # 用全局池化层代替全连接层，因此可以不用resize， 但发现如果不resize，无法用dataset批量取出
            transforms.Resize((96, 96)),
            # ToTensor会将图像的值从255归一化至0-1
            transforms.ToTensor(),
            # transforms.Lambda(lambda x: self.bi_mask(x)),
            
            # the value using in Norm is caculated with utils cal_mean_std()
            # nankai_without_handwrite  mean:0.8209  std:0.3395
            # nankai_raw  mean:0.8401  std:0.3287
            # nankai_black  mean:0.1599  std:0.3295
            transforms.Normalize(mean=0.8401, std=0.3287)   
        ])

        # create images [str of img_path], labels:[str of fontname], i2label:dict{int:"fontname"}
        self.images, self.labels, self.i2label, self.label2i = self.load_csv()

    def __len__(self):
        return len(self.images)

    def __getitem__(self, index):
        # print(self.images[index])
        img = Image.open(self.images[index])
        ilabel = self.label2i[self.labels[index]]
        if self.transform:
            img = self.transforms(img)
            # label = torch.tensor(label, dtype=torch.float)
            ilabel = torch.tensor(ilabel)
        return img, ilabel, index
        # img:tensor,    label:tensor

    # nankai_raw  mean:0.8401  std:0.3287
    # nankai_black  mean:0.1599  std:0.3295
    def deNorm(self, img_tensor):
        img = img_tensor*0.3287 + 0.8401
        return img
    
    def bi_mask(self, img_tensor):
        mask = (img_tensor < 0.01).float() * (-10.0)
        return img_tensor + mask
    
    def load_csv(self):
        images = []
        labels = []
        label_sets = None
        if not Path(self.csv).exists():
            for path in Path(self.root).joinpath(self.mod).rglob('*'):
                if path.is_file() and path.suffix in ['.jpg', '.png', '.jpeg']:
                    images.append(path.resolve())
                    labels.append(path.parent.stem)
            df = pd.DataFrame({'images_path':images, 'label':labels})
            df.to_csv(self.csv, index=False)
        else:
            df = pd.read_csv(self.csv)
            images = df['images_path'].tolist()
            labels = df['label'].tolist()

        if not Path(self.font_csv).exists():
            label_sets = pd.Series(sorted(labels)).drop_duplicates().reset_index(drop=True)
            label_sets.to_csv(self.font_csv, header=['font'], index_label='index')
        else:
            label_sets = pd.read_csv(self.font_csv, index_col='index')['font']
        i2label = label_sets.to_dict()
        label2i = {value:key for key, value in i2label.items()}

        return images, labels, i2label, label2i
        # images: list of str(pathname of img)  labels:list of str(fontname of class)

class BalancedSampler(Sampler):
    def __init__(self, dataset, batch_size, shuffle=True):
        """
        初始化分层采样器
        :param labels: 数据集的标签列表（List 或 NumPy 数组）
        :param batch_size: 每个批次的样本数量
        :param shuffle: 是否在每个 epoch 中打乱样本顺序
        """
        super(BalancedSampler, self).__init__()
        self.dataset = dataset
        self.batch_size = batch_size
        self.shuffle = shuffle
        # self.labels = [label for _, label in dataset]

        # 按类别分组
        self.label_to_indices = defaultdict(list)
        for _, label, img_idx in self.dataset:
            self.label_to_indices[label.item()].append(img_idx)
        self.num_classes = len(self.label_to_indices)
        self.len =  len(self.dataset) // self.batch_size
        print(f"初始化采样器，类别数为：{self.num_classes}")

        # 初始化每个类别的当前索引
        self.current_indices = {label: 0 for label in self.label_to_indices}

    def __iter__(self):
        """
        生成分层的批次索引
        """
        # 打乱每个类别的样本顺序
        if self.shuffle:
            for label in self.label_to_indices:
                np.random.shuffle(self.label_to_indices[label])

        # 初始化批次索引列表
        batch_indices = []

        for step in range(self.len):
            # 计算每个类别的样本数量
            self.num_samples_per_class = self.batch_size // len(self.label_to_indices)
            self.remaining_samples = self.batch_size % len(self.label_to_indices)
            # 遍历每个类别，按比例采样
            for label in self.label_to_indices:
                # indices:list[], 其中元素是具体某个label所含的样本的img_idx
                indices = self.label_to_indices[label]
                num_samples = self.num_samples_per_class + (1 if self.remaining_samples > 0 else 0)

                # 获取当前类别的剩余样本
                start_idx = self.current_indices[label]
                end_idx = start_idx + num_samples

                # 如果剩余样本不足，则从头开始循环
                if end_idx >= len(indices):
                    end_idx = len(indices)
                    remaining = num_samples - (end_idx - start_idx)
                    batch_indices.extend(indices[start_idx:end_idx])
                    batch_indices.extend(indices[:remaining])
                    self.current_indices[label] = remaining
                else:
                    batch_indices.extend(indices[start_idx:end_idx])
                    self.current_indices[label] = end_idx

                self.remaining_samples -= 1

            # 打乱批次内的样本顺序
            if self.shuffle:
                np.random.shuffle(batch_indices)

            yield batch_indices
            batch_indices = []

    def __len__(self):
        """
        返回总的batch数量
        """
        return self.len

if __name__ == '__main__':
    # train_data = ZhCharDataset_96('./dataset/nankai_black/', mod='val', transform=True)
    # sampler = BalancedSampler(train_data, batch_size=128, shuffle=True)
    # loader = DataLoader(train_data, batch_sampler=sampler)

    # data = iter(train_data)
    # viz = visdom.Visdom(server='http://localhost', port=8097)
    # for i, (x,y) in enumerate(data):
    #     viz.image(np.array(x), opts=dict(title='font-image', caption=y))
        # viz.image(img, opts=dict(title='font-image', caption=label))
    
    # img, label = train_data[10002]
    # viz.image(train_data.deNorm(img), opts=dict(title='font-image', caption=label))
    # viz.image(np.array(img), opts=dict(title='font-image', caption=label))
    # print(img, label)
    # print(img.shape)    #img.shape (C,H,W) = (1, 200, 200)
    # embed()
    #
    # for data in loader:
    #     imgs, labels = data
    #     print(imgs.shape)
    #     print(labels.shape)
    #     
    # img = Image.open('./dataset/nankai_raw/train/彩云/阿.png')
    # trainset = ZhCharDataset_96('./dataset/nankai_raw/', mod='train', transform=True)
    # box = trainset.detect_content_box(img)
    # print(box)

    dataset = ZhCharDataset_96('./dataset/nankai_raw/', mod='train', transform=True)
    x, y, idx = dataset[5232]   # x:(1,96,96) y:(1)
    x = dataset.deNorm(x)
    img = transforms.ToPILImage()(x)
    # img = x
    plt.imshow(img, cmap='gray')
    plt.show()
