# coding: utf-8
"""
@Time    : 2024/8/14 10:15
@Author  : Y.H LEE
"""
import torch
from torch.utils.data import Dataset, DataLoader
from sys_params import *
import pandas as pd

from utils.tools import StandardScaler


class SimpleDataset(Dataset):
    """
        Single to single prediction task only
    """

    def __init__(self, data_path, seq_window, dataset_type='train', split_rate=0.15, scale=False):
        self.cursor = 0
        self.seq_window = seq_window
        self.scaler = StandardScaler()
        # read data
        raw_data = pd.read_csv(data_path, encoding='utf-8')
        # get target data
        self.tgt_data = self.__get_target_data__(raw_data, dataset_type, split_rate, scale)

    def __get_target_data__(self, raw_data, dataset_type, split_rate, scale):
        if dataset_type == 'train':
            data = raw_data.iloc[0: int(raw_data.shape[0] * (1 - 2 * split_rate)), :]
        elif dataset_type == 'valid':
            data = raw_data.iloc[
                   int(raw_data.shape[0] * (1 - 2 * split_rate)): int(raw_data.shape[0] * (1 - split_rate)), :]
        else:
            data = raw_data.iloc[int(raw_data.shape[0] * (1 - split_rate)):, :]

        feat_data = data.iloc[:, -1]

        if scale:
            self.scaler.fit(raw_data.iloc[0: int(raw_data.shape[0] * (1 - 2 * split_rate)), :].iloc[:, -1].values)
            feat_data = self.scaler.transform(feat_data.values)
        else:
            feat_data = feat_data.values

        return feat_data

    def __getitem__(self, index):
        seq_data = self.tgt_data[index: index + self.seq_window]
        input_data, label = seq_data[: -1], seq_data[-1]
        input_data = torch.tensor(input_data, dtype=torch.float, device=device)
        label = torch.tensor(label, dtype=torch.float, device=device)

        return input_data, label

    def __len__(self):
        return self.tgt_data.shape[0]


# class SimpleDataset(Dataset):
#     def __init__(self, data_path, dataset_type='train', pred_type='MS', split_rate=0.15, scale=True):
#         self.scaler = StandardScaler()
#         # read data
#         raw_data = pd.read_csv(data_path, encoding='utf-8')
#         # get target data
#         self.tgt_data = self.__get_target_data__(raw_data, dataset_type, pred_type, split_rate, scale)
#
#         print('pause')
#
#     def __get_target_data__(self, raw_data, dataset_type, pred_type, split_rate, scale):
#         if dataset_type == 'train':
#             data = raw_data.iloc[:, 0: int(raw_data.shape[0] * (1 - 2 * split_rate))]
#         elif dataset_type == 'valid':
#             data = raw_data.iloc[:,
#                    int(raw_data.shape[0] * (1 - 2 * split_rate)): int(raw_data.shape[0] * (1 - split_rate))]
#         else:
#             data = raw_data.iloc[:, int(raw_data.shape[0] * (1 - split_rate)):]
#
#         raw_date = None
#         if pred_type == 'M' or pred_type == 'MS':
#             # split date and data columns
#             raw_date, feat_data = data.iloc[:, 0], data.iloc[:, 1:]
#         else:
#             feat_data = data.iloc[:, -1]
#
#         if scale:
#             self.scaler.fit(raw_data.iloc[:, 0: int(raw_data.shape[0] * (1 - 2 * split_rate))].values)
#             data = self.scaler.transform(feat_data.values)
#         else:
#             data = feat_data.values


if __name__ == '__main__':
    """
    To test Dataset Module
    """
    ett_h1_path = r'../datasets/ETT/ETTh1.csv'
    dataset = SimpleDataset(ett_h1_path, seq_window=32)
    data_loader = DataLoader(dataset=dataset,
                             batch_size=32,
                             )
    for i, data in enumerate(data_loader):
        print(data)
    print('pause')
