import numpy as np
import pandas as pd
import torch
import random
import os
from torch.utils.data import DataLoader,Dataset
import torch.fft as fft
from sklearn.preprocessing import StandardScaler,MinMaxScaler
from data.util import DataTransform_FD,DataTransform_TD

def load_forecast_npy(name,univar=False):
    data = np.load(f'datasets/{name}.npy')
    if univar:
        data = data[: -1:]

    # 6:2:2 =train: valid:test    
    train_slice = slice(None, int(0.6 * len(data)))
    valid_slice = slice(int(0.6 * len(data)), int(0.8 * len(data)))
    test_slice = slice(int(0.8 * len(data)), None)
    
    scaler = StandardScaler().fit(data[train_slice])  # 相当于给定了归一化的标标标准
    data = scaler.transform(data)  # 这里上面和下面进行归一化，就是fit,transform连着用的话们就会直接计算训练数据。
    data = np.expand_dims(data, 0)  # 扩展维度，np.expand_dims(a,axis=),再a的对应axis轴上扩展维度

    pred_lens = [24, 48, 96, 288, 672]
    return data, train_slice, valid_slice, test_slice, scaler, pred_lens, 0

def _get_time_feature(dt):
    return np.stack([
        dt.minute.to_numpy(),
        dt.hour.to_numpy(),
        dt.dayofweek.to_numpy(),
        dt.day.to_numpy(),
        dt.dayofyear.to_numpy(),
        dt.month.to_numpy(),
        dt.weekofyear.to_numpy(),
    ],axis=1).astype(np.float)

def load_forecast_csv(data_path,name,univar = False,SCALER = True):
    data = pd.read_csv(f'{data_path}/{name}', index_col='date', parse_dates=True) 
    dt_embed = _get_time_feature(data.index)
    n_covariate_cols = dt_embed.shape[-1]

    if univar:
        if name in ('ETTh1', 'ETTh2', 'ETTm1', 'ETTm2'):
            data = data[['OT']]
        elif name == 'electricity':
            data = data[['MT_001']]
        elif name == 'WTH':
            data = data[['WetBulbCelsius']]
        else:
            data = data.iloc[:, -1:]

    data = data.to_numpy()
    
    if name == 'ETTh1.csv' or name == 'ETTh2.csv':
        train_slice = slice(None, 12*30*24)
        valid_slice = slice(12*30*24,16*30*24)
        test_slice = slice(16*30*24,20*30*24)
    elif name == 'ETTm1' or name == 'ETTm2':
        train_slice = slice(None, 12 * 30 * 24 * 4)
        valid_slice = slice(12 * 30 * 24 * 4, 16 * 30 * 24 * 4)
        test_slice = slice(16 * 30 * 24 * 4, 20 * 30 * 24 * 4)
    else:
        train_slice = slice(None, int(0.6 * len(data)))
        valid_slice = slice(int(0.6 * len(data)), int(0.8 * len(data)))
        test_slice = slice(int(0.8 * len(data)), None)
    
    if SCALER:
        scaler = StandardScaler().fit(data[train_slice])
        data = scaler.transform(data)

    if name in ('electricity'):
        data = np.expand_dims(data.T,-1)
    else:
        data = np.expand_dims(data,0)
        
    if name in ('ETTh1','ETTh2','electricity','WTH'):
        pred_lens = [24,48,168,336,720]
    else:
        pred_lens = [24,48,96,228,672]
    
    # 我想要的效果是，设置长度的，比如说1~seq_len,下一个也是seq_len ~ 2 * seq_len,先写一个pos的
    return data

def data_generator(data_path,name,training_mode,subset = True,univar = True,scaler = True,batch_size = 32,drop_last = True,timeenc = 0,freq = 1):
    """
        集中处理，这里是大头
    """
    data = pd.read_csv(f'{data_path}/{name}', parse_dates=True)
    df_stamp = data[['date']]
    if name == 'ETTh1.csv' or name == 'ETTh2.csv':
        train_slice = slice(None, 12*30*24)
        valid_slice = slice(12*30*24,16*30*24)
        test_slice = slice(16*30*24,20*30*24)
        df_stamp = data[['date']]
    if univar:
        if name == 'ETTh1.csv' or name == 'ETTh2.csv':
            data = data['OT'] 
        elif  name == 'RaCylinder':
            colt = ['SN','功率W_(x)','振动X方向W_(x)','振动Y方向W_(x)','振动Z方向W_(x)','RaCylinder'] # 孔1
            data = data[colt]
    
    # 对数据进行scaler
    if scaler:
        scaler = StandardScaler().fit(np.expand_dims(data[train_slice],axis=1))
        data = scaler.transform(np.expand_dims(data,axis=1))
    
    # 时间特征维度的处理

    df_stamp['date'] = pd.to_datetime(df_stamp.date)
    if timeenc == 0:
        df_stamp['month'] = df_stamp.date.apply(lambda row: row.month, 1)
        df_stamp['day'] = df_stamp.date.apply(lambda row: row.day, 1)
        df_stamp['weekday'] = df_stamp.date.apply(lambda row: row.weekday(), 1)
        df_stamp['hour'] = df_stamp.date.apply(lambda row: row.hour, 1)
        data_stamp = df_stamp.drop(['date'], axis = 1).values
    elif timeenc == 1:
        data_stamp = time_features(pd.to_datetime(df_stamp['date'].values), freq=freq)
        data_stamp = data_stamp.transpose(1, 0)

    print(data.shape)
    # 预测的数据长度
    if name in ('ETTh1','ETTh2','electricity','WTH'):
            pred_lens = [24,48,168,336,720]
    else:
        pred_lens = [24,48,96,228,672]
    # 对数据进行处理
    train_set = DataUtil(data[train_slice],"train",24*4*4,0.1)
    test_set = DataUtil(data[test_slice],"test",24*4*4,0.1)
    valid_set = DataUtil(data[valid_slice],"valid",24*4*4,0.1)

    train_loader = DataLoader(train_set,batch_size = batch_size,shuffle = True,drop_last = drop_last)
    test_loader = DataLoader(test_set,batch_size = batch_size,shuffle = True,drop_last = drop_last)
    valid_loader = DataLoader(valid_set,batch_size = batch_size,shuffle = True,drop_last = drop_last)
    return train_loader,test_loader,valid_loader,pred_lens

class DataUtil(Dataset):
    """
    会根据不同的数据集的类型，进行单独生成
    """
    def __init__(self,dataset,training_mode,seq_len,jitter):
        super(DataUtil,self).__init__()
        self.training_mode = training_mode
        # 不必考虑是否需要scaler的作用，将数据变换称为我们要的维度，现在它有的维度应该是有OT,然后就是时间编码，我觉得它很没用，还没有多头来的快
        # 要不采用多头？多头的思想，将数据映射到不同的维度上？这个点可以考虑一下！（以后再说）
        self.data = dataset
        self.seq_len = seq_len
        # 不，我要写！加油啊！
        self.data_f = fft.fft(torch.Tensor(self.data)).abs()
        # 数据增强，进行时域频域数据增强
        if training_mode == 'pre_train':
            self.data = DataTransform_TD(self.data,jitter)
            self.data_f = DataTransform_FD(self.data_f) # 频域数据增强
        print(self.data.shape,self.data_f.shape)
    
    def __getitem__(self,index):
        start_index = index  # 开始位置
        end_index = index + self.seq_len # 结束位置
        # 返回这段之间的data与data_f即可
        return self.data[slice(start_index,end_index)],self.data_f[slice(start_index,end_index)]
    
    def __len__(self):
        # 多少批次 比如总数为：self.data.length / self.batch_size
        return 270

class PretrainDataset(Dataset):

    def __init__(self,
                 data,
                 sigma=0.5,
                 p=0.5,
                 multiplier=10):
        super().__init__()
        self.data = data
        self.p = p
        self.sigma = sigma
        self.multiplier = multiplier
        self.N, self.T, self.D = data.shape # num_ts时间序列的数量, time 每个时间的长度, dim维度

    def __getitem__(self, item):
        ts = self.data[item % self.N]
        return self.transform(ts), self.transform(ts)

    def __len__(self):
        return self.data.size(0) * self.multiplier

    def transform(self, x):
        return self.jitter(self.shift(self.scale(x)))

    def jitter(self, x):
        if random.random() > self.p:
            return x
        return x + (torch.randn(x.shape) * self.sigma)

    def scale(self, x):
        if random.random() > self.p:
            return x
        return x * (torch.randn(x.size(-1)) * self.sigma + 1)

    def shift(self, x):
        if random.random() > self.p:
            return x
        return x + (torch.randn(x.size(-1)) * self.sigma)

def time_features(dates, timeenc=1, freq='h'):
    """
    > `time_features` takes in a `dates` dataframe with a 'dates' column and extracts the date down to `freq` where freq can be any of the following if `timeenc` is 0: 
    > * m - [month]
    > * w - [month]
    > * d - [month, day, weekday]
    > * b - [month, day, weekday]
    > * h - [month, day, weekday, hour]
    > * t - [month, day, weekday, hour, *minute]
    > 
    > If `timeenc` is 1, a similar, but different list of `freq` values are supported (all encoded between [-0.5 and 0.5]): 
    > * Q - [month]
    > * M - [month]
    > * W - [Day of month, week of year]
    > * D - [Day of week, day of month, day of year]
    > * B - [Day of week, day of month, day of year]
    > * H - [Hour of day, day of week, day of month, day of year]
    > * T - [Minute of hour*, hour of day, day of week, day of month, day of year]
    > * S - [Second of minute, minute of hour, hour of day, day of week, day of month, day of year]

    *minute returns a number from 0-3 corresponding to the 15 minute period it falls into.
    """
    if timeenc==0:
        dates['month'] = dates.date.apply(lambda row:row.month,1)
        dates['day'] = dates.date.apply(lambda row:row.day,1)
        dates['weekday'] = dates.date.apply(lambda row:row.weekday(),1)
        dates['hour'] = dates.date.apply(lambda row:row.hour,1)
        dates['minute'] = dates.date.apply(lambda row:row.minute,1)
        dates['minute'] = dates.minute.map(lambda x:x//15)
        freq_map = {
            'y':[],'m':['month'],'w':['month'],'d':['month','day','weekday'],
            'b':['month','day','weekday'],'h':['month','day','weekday','hour'],
            't':['month','day','weekday','hour','minute'],
        }
        return dates[freq_map[freq.lower()]].values
    if timeenc==1:
        dates = pd.to_datetime(dates.date.values)
        return np.vstack([feat(dates) for feat in time_features_from_frequency_str(freq)]).transpose(1,0)

def batch_x_ffts(batch_x_patch,device):
    batch_x_patch = batch_x_patch.cpu()
    _,var,_,_ = batch_x_patch.size()
    res = 0
    for i in range(0,var):
        tmp = np.fft.fft(batch_x_patch[:,i,:,:])
        tmp = torch.from_numpy(tmp).unsqueeze(1)
#         print('tmp;s size  :',tmp.size())
        if i == 0:
            res = tmp
        else:
            res = torch.cat((res,tmp),dim = 1)
    return res.to(device)


class Dataset_ETT_hour(Dataset):
    def __init__(self, root_path, flag='train', size=None, 
                 features='S', data_path='ETTh1.csv', 
                 target='OT', scale=True, inverse=False, timeenc=0, freq='h', cols=None):
        # size [seq_len, label_len, pred_len]
        # info
        if size == None:
            self.seq_len = 24*4*4
            self.label_len = 24*4
            self.pred_len = 24*4
        else:
            self.seq_len = size[0]
            self.label_len = size[1]
            self.pred_len = size[2]
        # init
        assert flag in ['train', 'test', 'val']
        type_map = {'train':0, 'val':1, 'test':2}
        self.set_type = type_map[flag]
        
        self.features = features
        self.target = target
        self.scale = scale
        self.inverse = inverse
        self.timeenc = timeenc
        self.freq = freq
        
        self.root_path = root_path
        self.data_path = data_path
        self.__read_data__()

    def __read_data__(self):
        self.scaler = StandardScaler()
        df_raw = pd.read_csv(os.path.join(self.root_path,
                                          self.data_path))

        border1s = [0, 12*30*24 - self.seq_len, 12*30*24+4*30*24 - self.seq_len]
        border2s = [12*30*24, 12*30*24+4*30*24, 12*30*24+8*30*24]
        border1 = border1s[self.set_type]
        border2 = border2s[self.set_type]
        
        if self.features=='M' or self.features=='MS':
            cols_data = df_raw.columns[1:]
            df_data = df_raw[cols_data]
        elif self.features=='S':
            df_data = df_raw[[self.target]]

        if self.scale:
            train_data = df_data[border1s[0]:border2s[0]]
            self.scaler.fit(train_data.values)
            data = self.scaler.transform(df_data.values)
        else:
            data = df_data.values
            
        df_stamp = df_raw[['date']][border1:border2]
        df_stamp['date'] = pd.to_datetime(df_stamp.date)
        data_stamp = time_features(df_stamp, timeenc=self.timeenc, freq=self.freq)

        self.data_x = data[border1:border2]
        self.data_fft_x = data[border1:border2]
        
        if self.inverse:
            self.data_y = df_data.values[border1:border2]
        else:
            self.data_y = data[border1:border2]
        self.data_stamp = data_stamp
    
    def __getitem__(self, index):
        s_begin = index
        s_end = s_begin + self.seq_len
        r_begin = s_end - self.label_len 
        r_end = r_begin + self.label_len + self.pred_len

        seq_x = self.data_x[s_begin:s_end]
        if self.inverse:
            seq_y = np.concatenate([self.data_x[r_begin:r_begin+self.label_len], self.data_y[r_begin+self.label_len:r_end]], 0)
        else:
            seq_y = self.data_y[r_begin:r_end]
        seq_x_mark = self.data_stamp[s_begin:s_end]
        seq_y_mark = self.data_stamp[r_begin:r_end]

        return seq_x, seq_y, seq_x_mark, seq_y_mark
    
    def __len__(self):
        return len(self.data_x) - self.seq_len- self.pred_len + 1

    def inverse_transform(self, data):
        return self.scaler.inverse_transform(data)



# if __name__ == '__main__':
#     train_loader,test_loader,valid_loader,pre_lens = data_generator(data_path = "d:\\data\\etth",name = "ETTh1.csv",training_mode = "pre_train")
#     for i, (batch_x,batch_x_f) in enumerate(valid_loader):
#         if i > 3: break
#         print(batch_x.shape,batch_x_f.shape)
#         print(batch_x, batch_x_f)
    



        
        
        

    




