'''
27个新能源场站（14个风电场、13个光伏电站）, 2019年、2020年两年的历史出力数据、运行记录数据、实测气象数据以及天气预报数据

input
天气预报数据
天气实况数据
风电场出力
风电机组数据
光伏电站出力
光伏电站机组数据
AGC限值调整数据
output
未来7天新能源出力, 场站出力数据
'''


import os
import glob
abspath = os.path.abspath('../..')
from torch.utils.data import Dataset
import re
import time
import numpy as np
import pandas as pd
from collections import OrderedDict

from utils import tools

data_dir = '/home/kexin/phdwork/work4-tii/data/raw_windpower/dataset'
JSFD = ('JSFD001','JSFD002','JSFD003','JSFD004','JSFD005','JSFD006','JSFD007','JSFD008','JSFD009','JSFD010','JSFD011','JSFD012','JSFD013','JSFD014')
JSGF = ('JSGF001','JSGF002','JSGF003','JSGF004','JSGF005','JSGF006','JSGF007','JSGF008','JSGF009', 'JSGF010','JSGF011','JSGF012','JSGF013')
data_type = ('附件5-运行记录.xlsx', '附件2-场站出力.xlsx', '附件3-测风数据.xlsx')
column_name_wind = ['time', 'windspeed_10m', 'winddirection_10m', 'windspeed_30m', 'winddirection_30m', 'windspeed_50m',
                       'winddirection_50m', 'windspeed_70m', 'winddirection_70m',
                       'windspeed_fan', 'winddirection_fan', 'temperature', 'pressure', 'humidity']
column_name_light = ['time', 'total_irradiance', 'verticalirradiance', 'horizontalirradiance',
                     'temperature', 'pressure', 'humidity']

def loaddata_weather(station_name='JSFD001'):
    # obtain 27-dim data
    DATA = OrderedDict()
    weatherpath = 'weather_data/cepri_historic_2019010112_2020123112_' + station_name + '_' + station_name
    datapath = os.path.join(data_dir, weatherpath, '*.csv')
    filelist = sorted(glob.glob(datapath))
    for file in filelist:
        datetag = file[-19: -11]
        data = pd.read_csv(file).values
        data = [x[0].split(' ') for x in data]
        temp = []
        for d in data:
            x = np.array(d)
            x = x[np.where(x != '')].reshape(1,-1)
            temp.append(x)
        print(len(temp))
        X = np.concatenate(temp, axis=0)
        DATA[datetag] = X
        return DATA

def  loaddata_power(
        root_path,
        data_path,
        data_name='JSFD001'):
    datapath = os.path.join(root_path, data_path, data_name, '*.csv')
    datalist = sorted(glob.glob(datapath))
    #print(datalist)
    '''
    0: 附件2-场站出力.csv
    1: 附件3-测风数据.csv
    '''
    if data_name in JSFD:
        column_name_power = ['time', 'power']
        powerdata = pd.read_csv(datalist[0], skiprows=1, names=column_name_power, index_col=0)
        winddata = pd.read_csv(datalist[1], skiprows=1, names=column_name_wind, index_col=0, low_memory=False)
        assert len(powerdata) == len(winddata), 'Check Data, length of powerdata is no equal to winddata'
        time_idx = powerdata.index
        y = powerdata
        x = winddata
    elif data_name in JSGF:
        column_name_power = ['time', 'power']
        powerdata = pd.read_csv(datalist[0], skiprows=1, names=column_name_power, index_col=0)

        ligthdata = pd.read_csv(datalist[1], skiprows=1, names=column_name_light, index_col=0, low_memory=False)
        assert len(powerdata) == len(ligthdata), 'Check Data, length of powerdata is no equal to ligthdata'
        time_idx = powerdata.index
        y = powerdata
        x = ligthdata

    return time_idx, x, y


def split_data(x, input_length, output_length, split_step):
    datalist = []
    n, m = x.shape
    n_itr = n - output_length - input_length
    for k in range(0, n_itr, split_step):
        sample = (x[k:k+input_length,:], x[k+input_length: k+input_length+output_length, -1])
        datalist.append(sample)
    N = len(datalist)
    return datalist, N



class Dataset_windpower(Dataset):
    def __init__(self,
                 root_path,
                 data_path,
                 data_name,
                 flag='TRAIN',
                 config=None
                 ):
        self.root_path = root_path
        self.data_path = data_path
        self.data_name = data_name
        self.flag = flag
        self.scale = True

        self.input_length = config.input_length
        self.split_step = config.split_step
        self.input_cols = config.input_cols
        self.output_length = config.output_length

        self.__read_data()

    def __read_data(self):
        self.scaler = tools.StandardScaler()
        time_idx, x, y = loaddata_power(
            root_path=self.root_path,
            data_path=self.data_path,
            data_name=self.data_name
        )
        if self.input_cols == ['all']:
            x = x.values
        else:
            x = x[self.input_cols].values
        y = y.values
        xy = np.concatenate((x, y), axis=1)

        # split the sequence into train and test seqs
        num_train = int(len(xy) * 0.7)
        num_test = len(xy) - num_train
        border1s = [0, num_train + 1]
        border2s = [num_train, len(xy)]
        if self.flag == 'TRAIN':
            border1 = border1s[0]
            border2 = border2s[0]
        elif self.flag == 'TEST':
            border1 = border1s[1]
            border2 = border2s[1]
        xy = xy[border1:border2, :]

        if self.scale:
            self.scaler.fit(xy)
            data = self.scaler.transform(xy)
        else:
            data = xy

        self.datasample, self.N = split_data(
            x=data,
            input_length=self.input_length,
            output_length=self.output_length,
            split_step=self.split_step)

    def __getitem__(self, index):
        return self.datasample[index]

    def __len__(self):
        return self.N



if __name__ == '__main__':
    #time_idx, x, y = loaddata_power(station_name='JSFD001')
    import argparse

    parser = argparse.ArgumentParser(description='example')

    parser.add_argument('--root_path', type=str, default='../../data', help='root path of the data file')
    parser.add_argument('--data_name', type=str, default='JSFD001', help='data_name')
    parser.add_argument('--data_path', type=str, default='Windpower/raw_windpower/dataset',
                        help='Monash/Monash_UEA_UCR_Regression_Archive -> [AppliancesEnergy]'
                             'Windpower/raw_windpower/dataset -> [JSDF001]'
                             'Isdb/raw_isdb/')


    parser.add_argument('--station_name', type=str, default='JSFD001', help='station_name')
    parser.add_argument('--split_step', type=int, default=8, help='Index Difference between each training')
    parser.add_argument('--input_length', type=int, default=960, help='input sequence length of forecasting model')
    parser.add_argument('--input_cols', default=['all'], help='which features to use, default [all] or'
                                                                      'a list includs col_name, such as [feat1, feat2, ...]')
    parser.add_argument('--output_length', type=int, default=672, help='output sequence length of forecasting model')


    args = parser.parse_args()
    print(args)


    Data = Dataset_windpower
    mydata = Data(
        root_path=args.root_path,
        data_path=args.data_path,
        data_name=args.data_name,
        flag='TRAIN',
        config=args)
    print(mydata)
    print(len(mydata))
    print(mydata[0][0].shape)
    print(mydata[0][1].shape)

    print(mydata[0][0][0][13])
    #print(mydata[0][0])















