import torch
import os.path as osp
import numpy as np 
from torch.utils.data import Dataset

from joblib import Memory
from sklearn.datasets import load_svmlight_file

mem = Memory('./mycache')

@mem.cache
def get_data(file):
	data = load_svmlight_file(file)
	return data[0].todense(), data[1]

class ChemDataset(Dataset):

	def __init__(self, data_dir, train=True, seed=None, ratio=0.3,
              subsample=10000):

		pos_data, _ = get_data(osp.join(data_dir, 'data_po_rank.txt'))

		self.ratio = ratio
		self.feat_dim = 1024
		self.train = train

		self.neg_data = None

		if not seed:
			self.seed = seed
			np.random.seed(seed)

		N_p = len(pos_data)
		N_train = int(N_p * (1 - ratio))

		perm = np.random.permutation(N_p)

		if train:
			self.data = pos_data[perm[:N_train]]
			self.labels = np.ones((N_train,))
		else:
			with open(osp.join(data_dir, 'data_ne_rank.txt'), 'r') as f:
				neg_data_list = f.readlines()
				keep_idx = np.random.permutation(len(neg_data_list))
				if subsample:
					keep_idx = keep_idx[:subsample]
				neg_data_list = [neg_data_list[x] for x in keep_idx]

			self.data = pos_data[perm[N_train:]]
			self.neg_data = neg_data_list
			self.labels = np.concatenate((np.ones(N_p-N_train,), np.zeros(len(neg_data_list),)))

	def __getitem__(self, index):
		if self.train or index < len(self.data):
			return torch.from_numpy(self.data[index]).float().squeeze(), self.labels[index]
		else:
			return self.parse(self.neg_data[index - len(self.data)]), self.labels[index]

	def parse(self, feat):
		data = np.zeros(self.feat_dim)
		feats = [int(x.split(':')[0]) - 1 for x in feat.split(' ')[2:]]
		data[np.array(feats)] = 1
		return torch.from_numpy(data).float().squeeze()

	def __len__(self):
		if self.train:
			return self.data.shape[0]
		else:
			return self.data.shape[0] + len(self.neg_data)
			
	def get_batch(self, batchsize):
		kp = np.random.permutation(len(self))
		if self.train:
			batch = torch.from_numpy(self.data[kp[:batchsize]]).float().squeeze()
			labels = torch.from_numpy(self.labels[kp[:batchsize]]).long()
			return batch, labels
		else:
			raise NotImplementedError("get_batch method in ChemDataset only support train mode")

if __name__=='__main__':
	ds = ChemDataset('../data/',train=True,seed=1213)
	x,y = ds.get_batch(4)
	print(x.size(), y.size())
