import json

import pandas as pd
from torch.utils.data import Dataset, DataLoader

from NMTDemo.Vectorizer.NMTVectorizer import NMTVectorizer


class NMTDataset(Dataset):
    def __init__(self,text_df,vectorizer):
        self.text_df=text_df
        self._vectorizer=vectorizer

        self.train_df=self.text_df[self.text_df.split=='train']
        self.train_size=len(self.train_df)
        self.val_df=self.text_df[self.text_df.split=='val']
        self.val_size=len(self.val_df)
        self.test_df=self.text_df[self.text_df.split=='test']
        self.test_size=len(self.test_df)

        self._lookup_dict={'train':(self.train_df,self.train_size),
                           'val':(self.val_df,self.val_size),
                           'test':(self.test_df,self.test_size)}
        self.set_split('train')

    @classmethod
    def load_dataset_and_make_vectorizer(cls,dataset_csv):
        text_df=pd.read_csv(dataset_csv)
        train_subset=text_df[text_df.split=='train']
        return cls(text_df,NMTVectorizer.from_dataframe(train_subset))

    @classmethod
    def load_dataset_and_load_vectorizer(cls,dataset_csv,vectorizer_filepath):
        text_df=pd.read_csv(dataset_csv)
        vectorizer=cls.load_vectorizer_only(vectorizer_filepath)
        return cls(text_df,vectorizer)

    @staticmethod
    def load_vectorizer_only(vectorizer_filepath):
        with open(vectorizer_filepath) as fp:
            return NMTVectorizer.from_serializable(json.load(fp))

    def save_vectorizer(self,vectorizer_filepath):
        with open(vectorizer_filepath,"w")as fp:
            json.dump(self._vectorizer.to_serializable(),fp)

    def get_vectorizer(self):
        return self._vectorizer

    def set_split(self,split="train"):
        self._target_split=split
        self._target_df,self._target_size=self._lookup_dict[split]

    def __len__(self):
        return self._target_size

    def __getitem__(self, index):
        row=self._target_df.iloc[index]
        vector_dict=self._vectorizer.vectorize(row.source_language,row.target_language)
        return {"x_source":vector_dict["source_vector"],
                "x_target":vector_dict["target_x_vector"],
                "y_target":vector_dict["target_y_vector"],
                "x_source_length":vector_dict["source_length"]}

    def get_num_batches(self,batch_size):
        return len(self)//batch_size

def generate_nmt_batches(dataset,batch_size,shuffle=True,
                         drop_last=True,device="cpu"):
    dataloader=DataLoader(dataset=dataset,batch_size=batch_size,
                          shuffle=shuffle,drop_last=drop_last)
    for data_dict in dataloader:
        lengths=data_dict['x_source_length'].numpy()
        #将lengths从大到小排序，输出位置对应的索引
        sorted_length_indices=lengths.argsort()[::-1].tolist()
        out_data_dict={}
        for name,tensor in data_dict.items():
            out_data_dict[name]=data_dict[name][sorted_length_indices].to(device)
        yield out_data_dict