from __future__ import print_function
import pandas as pd
import numpy as np
from sklearn.preprocessing import StandardScaler
import torch
import torch.utils.data

class Dataset(object):

    def __init__(self, path, norm=True):
        self.df = pd.read_csv(path, index_col=0)
        self.df = self.df[self.df['hotness'] >= 1]

        self.Xa = self.df.filter(regex='f_a.*', axis=1).values
        self.Xb = self.df.filter(regex='f_b.*', axis=1).values
        self.y = np.asarray(self.df['hotness'].values, dtype=np.float64)
        self.y = np.log(self.y)
        
        if norm:
            self.Xa = StandardScaler().fit_transform(self.Xa)
            self.Xb = StandardScaler().fit_transform(self.Xb)
            self.y = (self.y - self.y.mean() / self.y.std())
        self.Xab = np.concatenate((self.Xa, self.Xb), axis=1)


class PytorchDataset(torch.utils.data.Dataset):
    def __init__(self, *data_list):
        # 保证所
        self.numpy_data_list = []
        for data in data_list:
            temp = []
            data = data.astype(np.float32)  # for pytorch
            assert data.shape[0] == data_list[0].shape[0]
            for sample in np.split(data, data.shape[0]):
                if len(sample.shape) >= 2:
                    sample = np.squeeze(sample, axis=0)
                sample = torch.from_numpy(sample)
                temp.append(sample)
            self.numpy_data_list.append(temp)

    def __len__(self):
        return len(self.numpy_data_list[0])

    def __getitem__(self, index):
        result = []
        for data in self.numpy_data_list:
            result.append(data[index])
        return tuple(result)
