import json

import pandas as pd
from torch.utils.data import Dataset, DataLoader
from torch.utils.data.dataset import T_co

from PytorchNLP5CBOW.CBOWVectorizer import CBOWVectorizer


class CBOWDataset(Dataset):
    def __init__(self,cbow_df,vectorizer):
        self.cbow_df=cbow_df
        self._vectorizer=vectorizer

        measure_len=lambda context:len(context.split(" "))
        self._max_seq_length=max(map(measure_len,cbow_df.context))

        self.train_df=self.cbow_df[self.cbow_df.split=='train']
        self.train_size=len(self.train_df)

        self.val_df=self.cbow_df[self.cbow_df.split=='val']
        self.val_size=len(self.val_df)

        self.test_df=self.cbow_df[self.cbow_df.split=='test']
        self.test_size=len(self.test_df)

        self._lookup_dict={'train':(self.train_df,self.train_size),
                           'val':(self.val_df,self.val_size),
                           'test':(self.test_df,self.test_size)}

        self.set_split('train')

    #加载数据集并创词向量
    @classmethod
    def load_dataset_and_make_vectorizer(cls,cbow_csv):
        cbow_df=pd.read_csv(cbow_csv)
        train_cbow_df=cbow_df[cbow_df.split=='train']
        return cls(cbow_df,CBOWVectorizer.from_dataframe(train_cbow_df))

    #加载数据集并加载词向量
    @classmethod
    def load_dataset_and_load_vectorizer(cls,cbow_csv,vectorizer_filepath):
        cbow_df=pd.read_csv(cbow_csv)
        vectorizer=cls.load_vectorizer_only(vectorizer_filepath)
        return cls(cbow_df,vectorizer)

    #从文件中读取词向量
    @staticmethod
    def load_vectorizer_only(vectorizer_filepath):
        with open(vectorizer_filepath) as fp:
            return CBOWVectorizer.from_serializable(json.load(fp))

    def save_vectorizer(self,vectorizer_filepath):
        with open(vectorizer_filepath,"w") as fp:
            json.dump(self._vectorizer.to_serializable(),fp)

    def get_vectorizer(self):
        return self._vectorizer

    #设置Dataset数据集
    def set_split(self,split="train"):
        self._target_split=split
        self._target_df,self._target_size=self._lookup_dict[split]

    def __len__(self):
        return self._target_size

    def __getitem__(self, index):
        row=self._target_df.iloc[index]
        context_vector=self._vectorizer.vectorize(row.context,self._max_seq_length)
        target_index=self._vectorizer.cbow_vocab.lookup_token(row.target)
        return {'x_data':context_vector,
                'y_target':target_index}

    def get_num_batches(self,batch_size):
        return len(self)//batch_size

def generate_batches(dataset,batch_size,shuffle=True,drop_last=True,device="cpu"):
    dataloader=DataLoader(dataset=dataset,batch_size=batch_size,shuffle=shuffle,drop_last=drop_last)
    for data_dict in dataloader:
        out_data_dict={}
        for name,tensor in data_dict.items():
            out_data_dict[name]=data_dict[name].to(device)
        yield out_data_dict