import random
from abc import abstractmethod
from typing import List

from numpy import array_split
from torch.utils.data import DataLoader, BatchSampler, Sampler, Dataset


def dot(weights: List[float], arr: List[float]):
    assert len(weights) == len(arr)
    return sum([each[0] * each[1] for each in zip(weights, arr)])


class BaseTxtDataset(Dataset):
    """
    文本数据类
    :argument texts : 数据
    :argument sort_weights : 排序权重，每种数据的长度权重[排序使用]
    texts[n] : 第n种数据
    texts[n][m] : 第n种数据的第m个句子
    texts[n][m][p] : 第n种数据的第m个句子第p个单词
    eg texts = [
        [["我","和","你"],["你","和","我"]],
        [["i","with","you"],["you","with","me"]],
        [["du","und","ich"],["ich","und","du"]],
    ]

    """
    data: List
    sort_weights: List[float]

    def __init__(self, texts, sort_weights: List[float] = None):
        assert len(set([len(each) for each in texts])) == 1  # 确保所有的text长度都相等
        self.data = list(zip(*texts))
        self.sort_weights = sort_weights

    def __getitem__(self, index):
        return self.data[index]

    def __len__(self):
        return len(self.data)

    @abstractmethod
    def collate_fn(self, batch):
        raise NotImplementedError

    def gen_data_loader(self, batch_size, shuffle=True):
        return MyDataLoader(
            dataset=self,
            batch_size=batch_size,
            collate_fn=self.collate_fn,
            drop_last=False,
            shuffle=shuffle
        )


class MySequentialSampler(Sampler):

    def __init__(self,
                 dataset: BaseTxtDataset,
                 sorted_weights,
                 bucket_size,
                 shuffle):
        super(MySequentialSampler, self).__init__(None)
        self.dataset = dataset
        self.indices = list(range(len(self.dataset)))
        self.bucket_size = bucket_size
        self.sorted_weights = sorted_weights
        self.shuffle = shuffle

        def sort_func(idx):
            return dot(sorted_weights, [len(each) for each in self.dataset[idx]])

        self.sort_func = sort_func

    def __iter__(self):
        if self.shuffle:
            # 随机
            indices = random.sample(self.indices, len(self.indices))
            # 把indices分成m个bucket_size , m = [len / bucket_size]
            buckets: List[List] = array_split(indices, (len(self.indices) // self.bucket_size) + 1)
            # 然后打乱这些buckets
            random.shuffle(buckets)
            # 然后对于每个buckets里面的indices,根据内容进行排序
            indices = []
            for bucket in buckets:
                sorted_bucket = sorted(bucket, key=self.sort_func)
                indices += sorted_bucket
            return iter(indices)
        else:
            return iter(self.indices)

    def __len__(self) -> int:
        return len(self.indices)


class MyDataLoader(DataLoader):
    def __init__(self,
                 dataset: BaseTxtDataset,
                 batch_size,
                 collate_fn,
                 drop_last,
                 shuffle=True):
        seq_sampler = MySequentialSampler(
            dataset=dataset,
            sorted_weights=dataset.sort_weights,
            bucket_size=5 * batch_size,
            shuffle=shuffle)
        sampler = BatchSampler(
            seq_sampler,
            batch_size=batch_size,
            drop_last=drop_last)
        super().__init__(dataset=dataset, collate_fn=collate_fn, batch_sampler=sampler)

# if __name__ == '__main__':
#     texts = [
#         [['%d' % i] * (i + 1) for i in range(200)],
#         [["i", "with", "you"], ["you", "with", "me"]] * 100,
#         [["du", "und", "ich"], ["ich", "und", "du"]] * 100,
#     ]
#     from torch.utils.data import Dataset
#
#
#     class TDataset(TxtDataset):
#
#         def collate_fn(self, batch):
#             return batch
#
#
#     ds = TDataset(
#         texts=texts,
#         sort_weights=[1, 0, 0]
#     )
#
#     dl = ds.gen_data_loader(batch_size=7, shuffle=False)
#     for each_batch in dl:
#         for item in each_batch:
#             print(list(set(item[0])))
