from torch.autograd import Variable
from torch.utils import data
from utils import get_subsequent_mask
from torchtext.functional import pad_sequence, add_token
import io
import os
from typing import Any, Iterator, Optional, Sized
from parameters import SPECIAL_TOKENS
from torchtext.vocab import Vocab
from torch import load
from torch import LongTensor
import jieba
import numpy as np
from functools import partial


class Dataset_text_vocab(data.Dataset):

    def __init__(self, fpath: str, vocab: Vocab) -> None:
        super().__init__()

        self.vocab = vocab
        self.tokenizer = jieba.lcut
        self.dict_data = load(fpath)
        jieba.load_userdict()

    def __getitem__(self, index) -> list[int] | list[str]:

        token_list = self.tokenizer(self.dict_data[str(index)])
        token_list.remove('\n')
        indices = LongTensor(self.vocab.lookup_indices(token_list))
        indices = add_token(
            indices, token_id=SPECIAL_TOKENS['<bos>'], begin=True)

        return add_token(indices, SPECIAL_TOKENS['<eos>'], begin=False)

    def __len__(self) -> int:
        return len(self.dict_data)


class Dataset_text(data.Dataset):

    def __init__(self, fpath: str) -> None:
        super().__init__()
        self.tokenizer = jieba.lcut
        self.dict_data = load(fpath)

    def __getitem__(self, index) -> list[int] | list[str]:

        token_list = self.tokenizer(self.dict_data[str(index)])
        token_list.remove('\n')
        # indices = LongTensor(self.vocab.lookup_indices(token_list))
        # indices = add_token(
        #     indices, token_id=SPECIAL_TOKENS['<bos>'], begin=True)

        return token_list

    def __len__(self) -> int:
        return len(self.dict_data)


# class Sampler_text(data.Sampler):
#     def __init__(self, data_source: Sized | None) -> None:
#         super().__init__(data_source)
#         self.data_source = data_source

#     def __iter__(self) -> Iterator:
#         for
#         return iter()

#     def __len__(self):
#         return len(self.data_source)


# class Batch:
#     '''
#     Object holding a batch of data with mask during training.
#     '''
#     def __init__(self,src,tgt=None,pad=0) -> None:
#         self.src = src
#         self.src_mask = (src != pad).unsqueeze(-2)
#         if tgt is not None:
#             self.tgt = tgt[:,:,:-1]
#             self.tgt_y = tgt[:,1:]
#             self.tgt_mask = self.make_std_mask(self.tgt,pad)

#     @staticmethod
#     def make_std_mask(tgt,pad):
#         '''
#         Create a mask to hide padding and future words.
#         '''
#         tgt_mask = (tgt != pad).squeeze(-2)
#         tgt_mask = tgt_mask & Variable(
#             get_subsequent_mask(tgt.size(-1)).type_as(tgt_mask.data)
#         )
#         return tgt_mask

# torchtext.data.get_tokenizer('')

if __name__ == '__main__':
    root = os.path.split(os.path.realpath(__file__))[0] + '\\dict_dataset.pt'

    # dataset = Dataset_text(fpath=root,)
    # collate_fn = partial(pad_sequence, batch_first=True,
    #                      padding_value=SPECIAL_TOKENS['<pad>'])
    # dataloader = data.DataLoader(
    #     dataset=dataset, batch_size=1, num_workers=1, collate_fn=collate_fn)
    # for x in dataset:
    #     print(x)
    #     break

    text = '我想吃鸳鸯哺乳和敦煌榆钱以及油酥蛋卷'
    text2 = '白居易的犹抱琵琶半遮面'
    text3 = '来点盐酸安非他酮缓释片'
    text4 = '我喜欢湖上春来似画图这一句'
    
    print(' '.join(jieba.lcut(text)))
    print(' '.join(jieba.lcut(text2)))
    print(' '.join(jieba.cut(text3)))
    print(' '.join(jieba.cut(text4)))
