import pandas as pd
import numpy as np
import paddle
import paddle.fluid as fluid
import numpy as np
import pickle
from tqdm import tqdm
import matplotlib.pyplot as plt
# 选择归一化方法
normalization=1 
# 选择神经网络
flag = 1 
day_num = 1
# 训练参数设置
BATCH_SIZE = 5
learning_rate = 0.001
EPOCH_NUM = 200

# 可视化天数选择
date_n = 7

#  从excel中读取温度，湿度，风速，降雨、日照强度、用电量
train_set = []
test_set = []
for i in range(5):
    sheet = pd.read_excel('work/Load_test.xlsx',
            sheet_name = i,usecols = range(1,97))
    train_set.append(sheet[0:365].values)
    test_set.append(sheet[365:365+31].values)
print(train_set[0].shape)
print(test_set[0].shape)

#*********
def data_pro(train_set, test_set, day_num):
    train_y = train_set[4]
    train_y =  train_y[day_num:, :]
    print(train_y.shape)
    # 选择特征，定义训练的输入数据
    temperature_data   = train_set[0]
    humidity_data      = train_set[1]
    wind_velocity_data = train_set[2]
    rainfall_data      = train_set[3]
    load_data          = train_set[4]
    sunshine_data      = train_set[5]
    # 将数据合并，成为输入的特征向量
    train_x_temp = np.concatenate((temperature_data,humidity_data, wind_velocity_data, rainfall_data, load_data, sunshine_data),axis = 1)
    temp = []
    for x in range(day_num):
        temp.append(train_x_temp[x:(365-day_num+x), :]) # 训练集的输入train_x中删除第一年的最后一天的数据
    
    train_x = np.concatenate(temp, axis=1)
    print(train_x.shape)
    # 根据选择特征的定义，修改测试集的数据
    # 修改测试集的标签数据
    test_y = test_set[0-day_num]
    test_y = test_y[day_num:, :]
    print(test_y.shape)
    # 修改测试集的输入数据
    temperature_data   = test_set[0]
    humidity_data      = test_set[1]
    wind_velocity_data = test_set[2]
    rainfall_data      = test_set[3]
    load_data          = test_set[4]
    sunshine_data      = test_set[5]
    test_x_temp = np.concatenate((temperature_data,humidity_data, wind_velocity_data, rainfall_data, load_data, sunshine_data),axis = 1)

    temp = []
    for x in range(day_num):
        temp.append(test_x_temp[x:(31-day_num+x), :])
    
    test_x = np.concatenate(temp, axis=1)
    print(test_x.shape)

    return train_x, train_y, test_x, test_y

train_x, train_y, test_x, test_y = data_pro(train_set=train_set, test_set=test_set, day_num=day_num)

#****
if normalization==0: 
     def normalization_Maxmin(train_x):
         Min = np.min(train_x, axis=0)
         Max = np.max(train_x, axis=0)
         train_x = (train_x-Min)/ (Max - Min)
         # 处理异常情况
         train_x[np.isnan(train_x)] = 0
         train_x[np.isinf(train_x)] = 0
         return train_x, Min, Max

     train_x, Min_x, Max_x = normalization_Maxmin(train_x)
     test_x = (test_x -Min_x)/ (Max_x - Min_x)

     train_y, Min_y, Max_y = normalization_Maxmin(train_y)
     test_y = (test_y-Min_y)/ (Max_y - Min_y)
else:
     def normalization_zscore(train_x):
         mu = np.mean(train_x, axis=0)
         sigma = np.std(train_x, axis=0)
         train_x = (train_x-mu)/sigma
         # 处理异常情况
         train_x[np.isnan(train_x)] = 0
         train_x[np.isinf(train_x)] = 0
         return train_x, mu, sigma

     train_x, mu_x, sgma_x = normalization_zscore(train_x)
     test_x = (test_x - mu_x)/sgma_x

     train_y, mu_y, sgma_y = normalization_zscore(train_y)
     test_y = (test_y-mu_y)/sgma_y

#****
def Full_connected(input): 
    hidden1 = fluid.layers.fc(input=input, size=50, act='relu') 
    hidden2 = fluid.layers.fc(input=hidden1, size=50, act='relu')
    hidden3 = fluid.layers.fc(input=hidden2, size=50, act='relu')
    hidden4 = fluid.layers.fc(input=hidden3, size=50, act='relu')
    prediction = fluid.layers.fc(input=hidden2, size=96, act=None) 
    return prediction

def LSTM_pre(x):

    fc0 = fluid.layers.fc(input=x, size=16)
    lstm_h, c = fluid.layers.dynamic_lstm(input=fc0, size=16, is_reverse=False)

    lstm_max = fluid.layers.sequence_pool(input=lstm_h, pool_type='max')
    lstm_max_tanh = fluid.layers.tanh(lstm_max)

    prediction = fluid.layers.fc(input=lstm_max_tanh, size=96, act=None)
    return prediction

#****
def readers(x, y):
    def reader():
        for i in range(x.shape[0]):
            yield x[i], y[i]
    return reader

#****-
trainer_reader = readers(train_x, train_y)
train_reader = paddle.batch(
    paddle.reader.shuffle(
        reader=trainer_reader,buf_size=300),
    batch_size=BATCH_SIZE)

paddle.enable_static()

if flag==0: 
    x = fluid.layers.data(name='x', shape=[480*day_num], dtype='float32')
    y = fluid.layers.data(name='y', shape=[96], dtype='float32')

    pred = Full_connected(x)
else:
    x = fluid.layers.data(name='x', shape=[1], dtype='float32', lod_level=1)
    y = fluid.layers.data(name='y', shape=[96], dtype='float32')

    pred = LSTM_pre(x)

cost = fluid.layers.square_error_cost(input=pred, label=y)
avg_cost = fluid.layers.mean(cost)

optimizer = fluid.optimizer.MomentumOptimizer(learning_rate=learning_rate, momentum=0.9)
opts = optimizer.minimize(avg_cost)

#****-
test_program = fluid.default_main_program().clone(for_test=True)
use_cuda = False
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())

feeder = fluid.DataFeeder(place=place, feed_list=[x, y])

losses = []
for pass_id in range(EPOCH_NUM):
    loss = []
    for batch_id, data in enumerate(train_reader()):
        train_cost = exe.run(program=fluid.default_main_program(), feed=feeder.feed(data), fetch_list=[avg_cost])   
        loss.append(train_cost[0][0]) 
    losses.append(np.mean(loss))
    if pass_id % 10==0:
        print('Epoch:', pass_id, 'loss:', np.mean(loss))

fluid.io.save_inference_model('./pd', ['x'], [pred], exe)

plt.figure(dpi=50,figsize=(24,8))
plt.plot(range(EPOCH_NUM), losses)
plt.xlabel('epoch')
plt.ylabel('Loss')
plt.show()
#****--
infer_exe = fluid.Executor(place)
inference_scope = fluid.core.Scope()
def convert2LODTensor(temp_arr, len_list):
    temp_arr = np.array(temp_arr) 
    temp_arr = temp_arr.flatten().reshape((-1, 1))
    print(temp_arr.shape)
    return fluid.create_lod_tensor(
        data=temp_arr,
        recursive_seq_lens =[len_list],
        place=fluid.CPUPlace()
        )
    
def get_tensor_label(mini_batch):  
    tensor = None
    labels = []
    
    temp_arr = []
    len_list = []
    for _ in mini_batch:   
        labels.append(_[1]) 
        temp_arr.append(_[0]) 
        len_list.append(len(_[0])) 
    tensor = convert2LODTensor(temp_arr, len_list)    
    return tensor, labels


if flag==0:
    tester_reader = readers(test_x, test_y)
    test_reader = paddle.batch(tester_reader, batch_size=31)
    test_data = next(test_reader())
    test_x = np.array([data[0] for data in test_data]).astype("float32")
    test_y= np.array([data[1] for data in test_data]).astype("float32")
else: # LSTM 网络的数据处理
    test_x = test_x.astype("float32")
    test_y= test_y.astype("float32")
    tester_reader = readers(test_x, test_y)
    test_reader = paddle.batch(tester_reader, batch_size=31-day_num)
    for mini_batch in test_reader():
        test_x,labels = get_tensor_label(mini_batch)
        break
    
print(test_x.shape)
with fluid.scope_guard(inference_scope):
    [inference_program, feed_target_names, fetch_targets] =\
        fluid.io.load_inference_model('./pd', infer_exe)
    results = infer_exe.run(inference_program,
        feed={feed_target_names[0]: test_x},
        fetch_list=fetch_targets)
#****----
if normalization==0: 
    results= (Max_y - Min_y)*results+Min_y
    test_y=  (Max_y - Min_y)*test_y+Min_y
else:
    results = results*sgma_y+mu_y
    test_y = test_y*sgma_y + mu_y
print(test_y.shape)

y1 = np.reshape(results, (1,test_y.shape[0]*test_y.shape[1]))
y2 = np.reshape(test_y, (1,test_y.shape[0]*test_y.shape[1]))
print(y1[0, :].shape)
plt.figure(dpi=50,figsize=(24,8))
plt.plot(range(date_n*96), y1[0, :date_n*96],  color='r', label='Real')
plt.plot(range(date_n*96), y2[0, :date_n*96], color='g', label='Predict')  #绿线为预测值
plt.xlabel('Time', fontsize = 20)
plt.ylabel('Load', fontsize = 20)
plt.legend()
plt.show()

print(results.shape)

s = "预测数据---"

mape = np.mean(np.abs((results - test_y) / test_y)) * 100
error = abs(results - test_y)

print(results.shape)
test_y.resize((31-day_num,96))
results.resize((31-day_num,96))
error.resize((31-day_num,96))



