# import torch
# import torch.utils.data as Data
#
# from constant.constant import src_vocab, tgt_vocab
#
#
# class MyDataSet(Data.Dataset):
#     def __init__(self, enc_inputs, dec_inputs, dec_outputs):
#         super(MyDataSet, self).__init__()
#         self.enc_inputs = enc_inputs
#         self.dec_inputs = dec_inputs
#         self.dec_outputs = dec_outputs
#
#     def __len__(self):
#         return self.enc_inputs.shape[0]
#
#     def __getitem__(self, idx):
#         return self.enc_inputs[idx], self.dec_inputs[idx], self.dec_outputs[idx]
#
#
# def preprocess_data(sentences):
#     enc_inputs, dec_inputs, dec_outputs = [], [], []
#     for i in range(len(sentences)):
#         enc_input = [[src_vocab[n] for n in sentences[i][0].split()]]  # [[1, 2, 3, 4, 0], [1, 2, 3, 5, 0]]
#         dec_input = [
#             [tgt_vocab[n] for n in sentences[i][1].split()]]  # [[6, 1, 2, 3, 4, 8], [6, 1, 2, 3, 5, 8]]
#         dec_output = [
#             [tgt_vocab[n] for n in sentences[i][2].split()]]  # [[1, 2, 3, 4, 8, 7], [1, 2, 3, 5, 8, 7]]
#
#         enc_inputs.extend(enc_input)
#         dec_inputs.extend(dec_input)
#         dec_outputs.extend(dec_output)
#
#     return torch.LongTensor(enc_inputs), torch.LongTensor(dec_inputs), torch.LongTensor(dec_outputs)
#
#
# def preprocess_user_input(sentences):
#     enc_inputs = [[src_vocab[n] for n in sentences.split()]]
#     return torch.LongTensor(enc_inputs)
