from __future__ import print_function
from torch.utils.data import Dataset
from torch.autograd import Variable
import torch
import numpy as np
import pickle


def MdataLoader(path1, path2):
    # load data
    # data shape = [samples, max_length]
    f1 = open(path1, 'rb')
    f2 = open(path2, 'rb')

    data1 = pickle.load(f1)
    data2 = pickle.load(f2)

    f1.close()
    return data1, data2


class Mdataset(Dataset):
    # dataset class for text classification
    def __init__(self, path1, path2):
        self.data1, self.data2 = MdataLoader(path1, path2)

        self.src_ = self.data1["data"]
        self.target_ = self.data1["age"]
        # self.src_len_ = self.data1["src_len"]
        self.cnt2cid = self.data1["cnt2word"]
        self.uid2feature = self.data2["product_vocab_dict"]


        assert len(self.src_) == len(self.target_)
        # assert len(self.src_) == len(self.src_len_)
        # print(len(self.src_))

    def __len__(self):
        return len(self.src_)

    def __getitem__(self, indx):
        # getitem method
        # print("label ", self.target_[indx])
        feature_array = []
        for uid in self.src_[indx]:
            if self.cnt2cid[uid] in self.uid2feature:
                # print(type(self.uid2feature[self.cnt2cid[uid]]))
                feature_array.append(self.uid2feature[self.cnt2cid[uid]])
            else:
                # print("0")
                feature_array.append([0, 0, 0, 0])
        feature_array = np.array(feature_array)
        self.sample_ = Variable(torch.LongTensor(self.src_[indx, ...])), \
                       Variable(torch.LongTensor(self.target_[indx])), \
                       Variable(torch.LongTensor(feature_array))

        #                    Variable(torch.LongTensor(self.src_len_[indx]))

        return self.sample_


if __name__ == '__main__':
    print("### local test in dataset start ###")
    from torch.utils.data import DataLoader
    batch_size = 1
    h5_train_pos = "/home/datanfs/macong_data/tencent_data/train_preliminary/train_data/h5train.pkl"
    aid_dict_pos = "/home/datanfs/macong_data/tencent_data/" \
                   "train_preliminary/train_data/h5aid.pkl"
    train_ = Mdataset(h5_train_pos, aid_dict_pos)
    train_loader = DataLoader(train_, batch_size=batch_size, shuffle=False, drop_last=True)
    for iter, traindata in enumerate(train_loader):
        train_inputs, train_labels, feature_inputs = traindata
        train_labels = torch.squeeze(train_labels)
        feature_inputs = torch.squeeze(feature_inputs)
        print(train_inputs)
        print(train_labels)
        print(feature_inputs)
        break
    print("### local test in dataset over ###")

