from torch.utils.data import Dataset, DataLoader
import pandas as pd
import yaml
import numpy as np


class CustomScaler:
    def __init__(self, arg):
        self.min_values = np.array(arg['min_value']) if arg['min_value'] is not None else None
        self.max_values = np.array(arg['max_value']) if arg['max_value'] is not None else None
        self.feature_range = arg['feature_range'] if arg['feature_range'] else [0, 1]
        self.arg = arg

    def fit(self, data):
        self.min_values = np.min(data, axis=0)
        self.max_values = np.max(data, axis=0)

    def transform(self, data):
        assert self.min_values is not None and self.max_values is not None, '使用前请先使用fit'
        scaled_data = (data - self.min_values) / (self.max_values - self.min_values)
        return self.feature_range[0] + (self.feature_range[1] - self.feature_range[0]) * scaled_data

    def fit_transform(self, data):
        self.fit(data)
        return self.transform(data)

    def inverse_transform(self, scaled_data):
        assert self.min_values is not None and self.max_values is not None, '使用前请先使用fit'
        scaled_data = (scaled_data - self.feature_range[0]) / (self.feature_range[1] - self.feature_range[0])
        return scaled_data * (self.max_values - self.min_values) + self.min_values

    def updata_params(self):
        self.arg['min_value'] = self.min_values.tolist()
        self.arg['max_value'] = self.max_values.tolist()
        return self.arg


class Mydataset(Dataset):
    def __init__(self, arg, mode='train'):
        self.price_col = arg['price_col']
        self.pred_col = arg['pred_col']
        self.arg_col = arg['arg_col']
        self.usecols = self.pred_col + self.arg_col
        self.time_col = arg['time_col']
        self.seg_len = arg['seg_len']
        # 数据处理
        row_datas = pd.read_excel(arg['filepath'])  # type: pd.DataFrame
        self.row_datas = row_datas
        if self.time_col:
            datas = row_datas.sort_values(self.time_col)
        datas = row_datas[self.usecols]
        # 归一化
        self.scaler = CustomScaler(arg)
        if arg['min_value'] is None or arg['max_value'] is None:
            self.scaler.fit(datas.values)
            arg = self.scaler.updata_params()  # 返回归一化参数，用于保存
        datas[self.usecols] = self.scaler.transform(datas.values)
        self.datas_len = len(datas)
        if arg['split']:
            # 训练集、测试集划分
            train_split = arg['train_split']
            assert 0 < train_split < 1, f'请确保输入split为(0，1）之间，你的输入为{train_split}'

            train_size = round(self.datas_len * train_split)
            val_size = self.datas_len - train_size
            if mode == 'train':
                self.datas = datas.iloc[:train_size + self.seg_len]
                self.len = train_size
            elif mode == 'val':
                self.datas = datas.iloc[train_size:]
                self.len = val_size - self.seg_len
        else:
            self.datas = datas
            self.len = len(datas) - self.seg_len
        self.arg = arg

    def __getitem__(self, item):
        data = self.datas.iloc[item:self.seg_len + item].values.astype(np.float32)

        label = self.datas[self.pred_col].iloc[self.seg_len + item].values.astype(np.float32)
        return data, label

    def __len__(self):
        return self.len

    def get_price_col_index(self):
        '''返回价格列对应索引'''
        return [self.datas.columns.get_loc(column) for column in self.price_col]

    def return_arg(self):
        '''返回修改归一化参数后的arg'''
        return self.arg

    def return_scaler(self):
        '''返回scalar'''
        return self.scaler


def get_dataset(arg):
    '''返回实例化後数据集'''
    trainset = Mydataset(arg, mode='train')
    valset = Mydataset(arg, mode='val')
    return trainset, valset


def get_dataloader(trainset, valset, arg):
    '''返回实例化後dataloader'''
    trainloader = DataLoader(dataset=trainset, batch_size=arg['batchsize'], shuffle=arg['shuffle'])
    valloader = DataLoader(dataset=valset, batch_size=arg['val_batchsize'], shuffle=False)
    return trainloader, valloader


if __name__ == '__main__':
    config = './config/default.yaml'
    with open(config, 'r', encoding='utf-8') as file:
        arg = yaml.load(file, Loader=yaml.FullLoader)
    dataset = Mydataset(arg, mode='train')
    dataloader = DataLoader(dataset, batch_size=16, shuffle=False)
    for data, label in dataloader:
        print(1)
        pass
