import numpy as np
import torch
from torchtext.vocab import vocab
from torchtext.data.utils import get_tokenizer
from torch.utils.data import DataLoader,Dataset
from torch.nn.utils.rnn import pack_padded_sequence


from collections import Counter,OrderedDict


de_tokenizer=get_tokenizer('spacy',language='de_core_news_sm')
en_tokenizer=get_tokenizer('spacy',language='en_core_web_sm')


def build_vocab(path,tokenizer):
    count=Counter()

    with open(path,'r',encoding='utf-8') as f:
        for line in f:
            count.update(tokenizer(line))
    sorted_vo=sorted(count.items(),key=lambda x:x[1],reverse=True)
    specials=['<pad>','<bos>','<eos>','<unk>']

    v=vocab(OrderedDict(sorted_vo))

    for special in specials:
        idx=0
        v.insert_token(special,idx)
        idx+=1

    ## Vocab
    return v

baseUrl="./datas/"
en=build_vocab(baseUrl+'train.en',en_tokenizer)
de=build_vocab(baseUrl+'train.de',de_tokenizer)



class myDataSet(Dataset):
    def __init__(self,vocabs,paths,tokenizers) -> None:
        super().__init__()

        """
        data_format:(en_txt,de_txt)->(tokens,tokens)->(list(indexs),list(indexs))
        ex: (i have a dream,我有一个梦想)->([i,have,a,dream],[我,有,一个,梦想])->([1,3,2,4],[3,1,2,4])
        """

        self.baseURL="./datas/"
        self.vocabs=vocabs
        self.paths=paths
        self.tokenizers=tokenizers


        self.max_lengths=[]
        self.data=self.read_txt()
        self.lens=len(self.data)

        self.specials={'pad':'<pad>','eos':'<eos>','bos':'<bos>','unk':'<unk>'}


    def read_txt(self):
        fs=[open(self.baseURL+path,encoding='utf-8') for path in self.paths]
        data=[]
        max_length=[0,0]
        for (en_item,de_item) in zip(fs[0],fs[1]):
            en_rows=self.vocabs[0].lookup_indices(self.tokenizers[0](en_item))
            de_rows=self.vocabs[1].lookup_indices(self.tokenizers[1](de_item))

            ##  记录翻译和被翻译语句最长句子单词个数
            if len(en_rows)>max_length[0]:
                max_length[0]=len(en_rows)
            
            if len(de_rows)>max_length[1]:
                max_length[1]=len(de_rows)

            data.append((en_rows,de_rows))

        self.max_lengths=max_length
        for f in fs:
            f.close()
        return data

    def pad_item(self,item:list,tar=False):
        item_len=len(item)
        if tar:
            lens=self.max_lengths[1]-item_len
            idx=1
        else:
            lens=self.max_lengths[0]-item_len
            idx=0
        BOS_IDX=self.vocabs[idx][self.specials['bos']]
        EOS_IDX=self.vocabs[idx][self.specials['eos']]
        PAD_IDX=self.vocabs[idx][self.specials['pad']]

        ## 增加开始和结束标志
        item.insert(0,BOS_IDX)
        item.append(EOS_IDX)
        for _ in range(lens):
            item.append(PAD_IDX)
        
        return np.array(item,dtype=np.int64),item_len+2

    def __len__(self):
        return self.lens

    def __getitem__(self, idx):
        en=self.pad_item(self.data[idx][0])
        de=self.pad_item(self.data[idx][1],tar=True)

        return en[0],de[0],en[1],de[1]



### pack_pad

## <bos> i have a dream <eos> <pad> ->1*6*32 [(1,-1,:)==0]
## <bos> i am a good student <eos>  ->1*7*32

if __name__=="__main__":
    paths=['train.en','train.de']
    d=myDataSet([en,de],paths,[en_tokenizer,de_tokenizer])
    dl=DataLoader(d,batch_size=4)

    for e,d,el,d_l in dl:
        print(e)
        v,idx=torch.sort(el,descending=True)
        e=e[idx].T
        paded=pack_padded_sequence(e,v)
        print(paded)
        break