import numpy as np
import pandas as pd
from sklearn.preprocessing import StandardScaler, MinMaxScaler
import torch
import torch.nn as nn
from torch.utils.data import DataLoader,Dataset


def load_forecast_npy(name, univar=False):
    # .npy文件是使用训练好的模型，保存的模型文件格式
    data = np.load(f'datasets/{name}.npy')    
    if univar:
        data = data[: -1:]  # 将数据中的最后一个元素删除

    # 6:2:2 =train: valid:test    
    train_slice = slice(None, int(0.6 * len(data)))
    valid_slice = slice(int(0.6 * len(data)), int(0.8 * len(data)))
    test_slice = slice(int(0.8 * len(data)), None)
    
    scaler = StandardScaler().fit(data[train_slice])  # 相当于给定了归一化的标标标准
    data = scaler.transform(data)  # 这里上面和下面进行归一化，就是fit,transform连着用的话们就会直接计算训练数据。
    data = np.expand_dims(data, 0)  # 扩展维度，np.expand_dims(a,axis=),再a的对应axis轴上扩展维度

    pred_lens = [24, 48, 96, 288, 672]
    return data, train_slice, valid_slice, test_slice, scaler, pred_lens, 0

def _get_time_features(dt):
    return np.stack([
        dt.minute.to_numpy(),
        dt.hour.to_numpy(),
        dt.dayofweek.to_numpy(),
        dt.day.to_numpy(),
        dt.dayofyear.to_numpy(),
        dt.month.to_numpy(),
        # dt.weekofyear.to_numpy(),
    ], axis=1).astype(np.float64)


def load_forecast_csv(name, univar=False):
    # datasets
    data = pd.read_csv(f'd:/data/etth/{name}.csv', index_col='date', parse_dates=True)
    dt_embed = _get_time_features(data.index)
    n_covariate_cols = dt_embed.shape[-1]
    
    if univar:
        if name in ('ETTh1', 'ETTh2', 'ETTm1', 'ETTm2'):
            data = data[['OT']]
        elif name == 'electricity':
            data = data[['MT_001']]
        elif name == 'WTH':
            data = data[['WetBulbCelsius']]
        else:
            data = data.iloc[:, -1:]

    data = data.to_numpy()
    if name == 'ETTh1' or name == 'ETTh2':
        train_slice = slice(None, 12 * 30 * 24)
        valid_slice = slice(12 * 30 * 24, 16 * 30 * 24)
        test_slice = slice(16 * 30 * 24, 20 * 30 * 24)
    elif name == 'ETTm1' or name == 'ETTm2':
        train_slice = slice(None, 12 * 30 * 24 * 4)
        valid_slice = slice(12 * 30 * 24 * 4, 16 * 30 * 24 * 4)
        test_slice = slice(16 * 30 * 24 * 4, 20 * 30 * 24 * 4)
    elif name.startswith('M5'):
        train_slice = slice(None, int(0.8 * (1913 + 28)))
        valid_slice = slice(int(0.8 * (1913 + 28)), 1913 + 28)
        test_slice = slice(1913 + 28 - 1, 1913 + 2 * 28)
    else:
        train_slice = slice(None, int(0.6 * len(data)))
        valid_slice = slice(int(0.6 * len(data)), int(0.8 * len(data)))
        test_slice = slice(int(0.8 * len(data)), None)
    
    scaler = StandardScaler().fit(data[train_slice])
    data = scaler.transform(data)
    if name in ('electricity') or name.startswith('M5'):
        data = np.expand_dims(data.T, -1)  # Each variable is an instance rather than a feature
    else:
        data = np.expand_dims(data, 0)

    if n_covariate_cols > 0:
        dt_scaler = StandardScaler().fit(dt_embed[train_slice])
        dt_embed = np.expand_dims(dt_scaler.transform(dt_embed), 0)
        data = np.concatenate([np.repeat(dt_embed, data.shape[0], axis=0), data], axis=-1)

    if name in ('ETTh1', 'ETTh2', 'electricity', 'WTH'):
        pred_lens = [24, 48, 168, 336, 720]
    elif name.startswith('M5'):
        pred_lens = [28]
    else:
        pred_lens = [24, 48, 96, 288, 672]
        
    return data, train_slice, valid_slice, test_slice, scaler, pred_lens, n_covariate_cols


class etth_dataset(Dataset):
    # 给到这里的数据，已经是上面处理过的了，而且数据的代销为：12*30*24，我们要分成几个小块
    def __init__(self,dataset,training_mode,seq_len,patch_size,patch_nums,feature):
        self.dataset = dataset
        self.training_mode = training_mode
        self.seq_len = seq_len # 每个
        self.patch_size = patch_size # 每个分片的大小
        self.patch_nums = patch_nums
        # self.batch_size = batch_size
        self.feature = feature

        self.__read_data__()
    
    def __read_data__(self):
        # patch_size和patch_nums
        assert self.seq_len % self.patch_size == 0, "the seq_length can't div patch_size to zero"
        assert self.patch_size % self.patch_nums == 0, "the patch_size can't div patch_nums to zero"
        # 将数据分开
        if len(self.dataset.shape) == 3:
            # 重塑这里的一维：原有的一维为：(1*8640*7) -> 转换成batch_size,seq_len,feature
            # actual_length = self.batch_size*self.seq_len*self.feature 
            actual_length = self.seq_len 
            self.nums = self.dataset.shape[1] // actual_length
            self.dataset = self.dataset[:actual_length * self.nums]
            # print(self.dataset.shape ,"actual_length : ",actual_length)

            self.dataset = self.dataset.reshape(-1,self.seq_len,self.feature)
            # print('三维数据')
        else:
            # 分成三个维度，当前这三个维度中，应该是能够每次取出一个batch的数据
            actual_length = self.seq_len # 实际上能用来计算的数据长度
            self.nums = self.dataset.shape[1] // actual_length # 这个是一共可以分成多少个小的数据
            self.dataset = self.dataset[:actual_length] # 所有的长度的数据
            self.dataset = self.dataset.reshape(-1,self.seq_len,self.feature)
    
    def __len__(self):
        # 多少个样本，也就是后面的getitem中的index的最大值
        return self.nums
    
    def __getitem__(self,index):
        #一个的长度 self.seq_len * self.feature
        # tmp_begin = index * self.seq_len * self.feature
        # tmp_end = (index + 1 ) * self.seq_len * self.feature
        data = self.dataset[index,:,:]
        return data


# if __name__ == '__main__':
#     data, train_slice, valid_slice, test_slice, scaler, pred_lens, n_covariate_cols = load_forecast_csv('ETTh1',True)
#     train_data = data[:,train_slice]
#     print(train_data.shape)
#     dataset = etth_dataset(train_data,'train',96,24,4,7)
#     dataloader = DataLoader(dataset,batch_size = 3,drop_last=True,shuffle = False)
#     for i,d in enumerate(dataloader):
#         print(i,d.shape)
