import csv
import math
import os
import random
import time
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from keras.layers.core import Dense, Dropout
from keras.layers.recurrent import LSTM
from keras.losses import mean_squared_error
from sklearn.metrics import mean_absolute_error
from keras.models import Sequential
from sklearn import preprocessing

from PSO_1 import PSO

data = pd.read_csv('A3.csv')
data = data['A3']
data = data.values.reshape(len(data),1)
print(data.shape)

#设置seq_length为一个时间序列样本  去预测下一个
seq_length = 20    #回溯时间步长   在这修改
delay = 1
#直接采样
data_ = []
for i in range(len(data) - seq_length):
    data_.append(data[i: i + seq_length + delay])
data_ = np.array([df for df in data_])
print(data_.shape)
x = data_[:, :seq_length, :]
y = data_[:, -1, 0]
#切片划分训练集、验证集和测试集
split_b = int(data_.shape[0]*0.9)
X_train = x[ : split_b]
y_train = y[ : split_b]
X_test = x[split_b: ]
y_test = y[split_b: ]
#求每一列的均值,数据标准化  label不用标准化
mean = X_train.mean(axis=0)
std = X_train.std(axis=0)
X_train = (X_train-mean)/std
X_test = (X_test-mean)/std
print(X_train.shape, y_train.shape, X_test.shape, y_test.shape)

def model_test_score(model, X_test, y_test):
    y_hat = model.predict(X_test)
    y_test = y_test.reshape(len(y_test), 1)
    temp = pd.DataFrame(y_hat)
    temp['yhat'] = y_hat
    temp['y'] = y_test
    #print(temp.shape)
    temp_rmse = np.sqrt(mean_squared_error(temp.y, temp.yhat))
    temp_mae = mean_absolute_error(temp.y, temp.yhat)
    print('test RMSE: %.6f' % temp_rmse)
    print('test MAE: %.6f' % temp_mae)
    return temp_rmse, temp_mae


def model_score(model, X_train, y_train):
    trainScore = model.evaluate(X_train, y_train, verbose=0)
    print('Train Score: %.6f MSE (%.6f RMSE)' % (trainScore[0], math.sqrt(trainScore[0])))
    return trainScore[0]


def writeOneCsv(relate_record, src):
    with open(src, 'a', newline='\n') as csvFile:
        writer = csv.writer(csvFile)
        writer.writerow(relate_record)


def build_model(neurons, d):
    model_lstm = Sequential()
    # 对每天61条记录进行分块
    model_lstm.add(LSTM(neurons, input_shape=(20, 1), return_sequences=False))
    model_lstm.add(Dropout(d))
    model_lstm.add(Dense(1, kernel_initializer="uniform", activation='linear'))
    # adam = keras.optimizers.Adam(decay=0.2)
    model_lstm.compile(loss='mse', optimizer='adam', metrics=['accuracy'])
    #model_lstm.summary()
    return model_lstm


def training_bus(X):
    neurons = int(X[0])
    dropout = round(X[1], 6)
    batch_size = int(X[2])
    model = build_model(neurons, dropout)
    print('neurons:' + str(int(X[0])) + '  dropout:' + str(dropout) + '   batch_size:' + str(batch_size))
    model.fit(
        X_train,
        y_train,
        batch_size=batch_size,
        epochs=10,
        verbose=0)
    trainScore = model_score(model, X_train, y_train,)
    model_test_score(model, X_test, y_test)

    #model.save('neurons' + str(int(X[0])) + '_dropout' + str(dropout) + '_batch_size' + str(batch_size) + '.h5')

    finish = [int(X[0]), dropout, batch_size, round(trainScore, 6)]
    model_scores.append(finish)
    writeOneCsv(finish, '模型参数效果比较.csv')
    # 训练完成后可直接加载模型
    # model_lstm = load_model('LSTM_bus_' + str(X[0]) + '_' + str(X[1]) + '_' + str(X[2]) + '_' + '.h5')
    pred = model.predict(X_test)
    le = len(pred)
    y_t = y_test.reshape(-1, 1)
    #print(y_t.shape)
    return pred, le, y_t


def function(ps, test, le):
    ss = sum(((abs(test - ps)) / test) / le)
    return ss


# (1) data size
INPUT_SIZE = 1
OUTPUT_SIZE = 1
# (2) PSO Parameters
MAX_EPISODES = 20
MAX_EP_STEPS = 20
c1 = 2
c2 = 2
w = 0.5
pN = 2  # 粒子数量

# (3) LSTM Parameters
dim = 3  # 搜索维度   (3个参数要调整)
X = np.zeros((pN, dim))  # 所有粒子的位置和速度
V = np.zeros((pN, dim))
pbest = np.zeros((pN, dim))  # 个体经历的最佳位置和全局最佳位置
gbest = np.zeros(dim)
p_fit = np.zeros(pN)  # 每个个体的历史最佳适应值
print(p_fit.shape)
print(p_fit.shape)

t1 = time.time()
model_scores = [['neuron0', 'Dropout', 'batch_size', 'trainScore']]

my_pso = PSO(popsize = 2, maxgen = 100, dim=3, popmin=-50, popmax=50, c1=2, c2=2, w1=0.5, vmax=10, vmin=3)
my_pso.init_population()    #  popsize, maxgen, dim, popmin, popmax, c1, c2, w1, vmax, vmin
#fitness = my_pso.iterator()
fitness = my_pso.iter_optimize()
print(fitness)

'''
神经网络第一层神经元个数,dropout比率,batch_size '''
UP = [64, 0.14, 32]
DOWN = [32, 0.05, 16]

# (4) 开始搜索
for i_episode in range(MAX_EPISODES):
    """初始化s"""
    random.seed(8)
    fit = -1e5  # 全局最佳适应值
    # 初始粒子适应度计算
    print("计算初始全局最优")
    for i in range(pN):
        for j in range(dim):
            V[i][j] = random.uniform(0, 1)
            if j == 1:
                X[i][j] = random.uniform(DOWN[j], UP[j])
            else:
                X[i][j] = round(random.randint(DOWN[j], UP[j]), 0)
        pbest[i] = X[i]
        le, pred, y_t = training_bus(X[i])
        NN = 1
        tmp = function(pred, y_t, le)
        p_fit[i] = tmp
        if tmp > fit:
            fit = tmp
            gbest = X[i]
    print("初始全局最优参数：{:}".format(gbest))

    fitness = []  # 适应度函数
    for j in range(MAX_EP_STEPS):
        fit2 = []
        plt.title("第{}次迭代".format(i_episode))
        for i in range(pN):
            le, pred, y_t = training_bus(X[i])
            temp = function(pred, y_t, le)
            fit2.append(temp / 1000)
            if temp > p_fit[i]:  # 更新个体最优
                p_fit[i] = temp
                pbest[i] = X[i]
                if p_fit[i] > fit:  # 更新全局最优
                    gbest = X[i]
                    fit = p_fit[i]
        print("搜索步数：{:}".format(j))
        print("个体最优参数：{:}".format(pbest))
        print("全局最优参数：{:}".format(gbest))

        for i in range(pN):
            V[i] = w * V[i] + c1 * random.uniform(0, 1) * (pbest[i] - X[i]) + c2 * random.uniform(0, 1) * (gbest - X[i])
            ww = 1
            for k in range(dim):
                if DOWN[k] < X[i][k] + V[i][k] < UP[k]:
                    continue
                else:
                    ww = 0
            X[i] = X[i] + V[i] * ww

        fitness.append(fit)

print('Running time: ', time.time() - t1)


def writeCsv(relate_record, src):
    import csv
    with open(src, 'w', newline='\n') as csvFile:
        writer = csv.writer(csvFile)
        for row in relate_record:
            try:
                writer.writerow(row)
            except Exception as e:
                print(e)
                print(row)
                # writeCsvUTF8(relate_record,bus)


writeCsv(model_scores, 'model_scores.csv')

print(fitness)

# 画图
plt.figure(1)
plt.title("Figure1")
plt.xlabel("iterators", size=14)
plt.ylabel("fitness", size=14)
t = np.array([t for t in range(0, 100)])
fitness = np.array(fitness)
plt.plot(t, fitness, color='b', linewidth=3)
plt.show()
