import math
import os
import pickle
import keras
from matplotlib.ticker import FormatStrFormatter
from mpl_toolkits.mplot3d import Axes3D
from sklearn import preprocessing
from sklearn.model_selection import train_test_split
import pandas as pd  # 是python的一个数据分析包
import matplotlib.pyplot as plt
from keras.models import Sequential
from keras.layers import Dense
from keras import optimizers
import numpy as np


def error(y_predict, y_test):  # 定义计算误差平方和函数，，，传入的是估算出的值，和测试值，，这里仅仅是用来定义的，方便后面的调用。
    sum = 0
    lens = len(y_predict)
    for i in range(lens):
        e = (y_predict[i] - y_test[i]) ** 2
        # errs.append(e)
        sum += e
    return math.sqrt(sum) / lens


def error_MAE(y_predict, y_test):  # 定义计算误差平方和函数，，，传入的是估算出的值，和测试值，，这里仅仅是用来定义的，方便后面的调用。
    sum = 0
    lens = len(y_predict)
    for i in range(lens):
        e = abs(y_predict[i] - y_test[i])
        # errs.append(e)
        sum += e
    return math.sqrt(sum) / lens


# Load Data
names1 = ['alpha', 'CL']
dat1 = pd.read_csv('./NACA6409-CL/UIUC-NACA-6409.txt', skiprows=[0], names=names1, header=None, sep='\s+',
                   usecols=[0, 1])
print(dat1)
x0 = np.mat(np.array(dat1)[:, 0])
x = x0.reshape(x0.shape[0], 1)
y0 = np.mat(np.array(dat1)[:, 1])
y = y0.reshape(y0.shape[0], 1)

# 定义数据
## 均匀划分训练集和测试集
x_train = x[0:x.shape[0]:2]
x_test_all = x[1:x.shape[0]:2]
y_train = y[0:y.shape[0]:2]
y_test_all = y[1:y.shape[0]:2]
x_valid = x_test_all[0:x_test_all.shape[0]:2]
x_test = x_test_all[1:x_test_all.shape[0]:2]
y_valid = y_test_all[0:y_test_all.shape[0]:2]
y_test = y_test_all[1:y_test_all.shape[0]:2]
# ## 随机划分训练集和测试集
# x_train_all, x_test, y_train_all, y_test = train_test_split(x, y, test_size=0.2)
# x_train, x_valid, y_train, y_valid = train_test_split(
#     x_train_all, y_train_all, test_size=0.2)
# x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.3)

# Z-Score标准化
x_scaler = preprocessing.StandardScaler().fit(x_train)  # 建立StandardScaler对象
y_scaler = preprocessing.StandardScaler().fit(y_train)  # 建立StandardScaler对象
x_train_st = x_scaler.fit_transform(x_train)  # StandardScaler标准化处理
y_train_st = y_scaler.fit_transform(y_train)  # StandardScaler标准化处理
x_test_st = x_scaler.fit_transform(x_test)  # StandardScaler标准化处理
y_test_st = y_scaler.fit_transform(y_test)  # StandardScaler标准化处理

x_valid_scaled = x_scaler.transform(x_valid)
y_valid_scaled = y_scaler.transform(y_valid)

# 定义模型
model = Sequential()
model.add(Dense(units=13, activation='relu', input_dim=2))
model.add(Dense(units=13, activation='relu'))
model.add(Dense(units=13, activation='relu'))
model.add(Dense(units=13, activation='relu'))
# model.add(Dense(units=17, activation='relu', input_dim=17))
# model.add(Dense(units=17, activation='relu', input_dim=17))
# model.add(Dense(units=17, activation='relu', input_dim=17))
# model.add(Dense(units=15, activation='relu', input_dim=17))
model.add(Dense(units=3, input_dim=13))
logdir = os.path.abspath('.') + os.sep + "2_dnn-bn-callbacks"  # 反正不要出现斜杠表示路径
if not os.path.exists(logdir):
    os.mkdir(logdir)
output_model_file = os.path.abspath('.') + os.sep + "2_dnn-bn-callbacks" + os.sep + "fashion_mnist_model.h5"
output_scale = os.path.abspath('.') + os.sep + "2_dnn-bn-callbacks" + os.sep + "scale_model.txt"
with open(output_scale, 'wb') as fo:
    pickle.dump([x_scaler, y_scaler], fo, fix_imports=True)
# model.compile(loss='mean_squared_error', optimizer='sgd')

callbacks = [
    keras.callbacks.TensorBoard(logdir),  # 保存在logdir中
    keras.callbacks.ModelCheckpoint(output_model_file,  # 模型名称
                                    save_best_only=True),  # 保存一个最好的模型
    # keras.callbacks.EarlyStopping(patience=5, min_delta=1e-3),#如果连续5次迭代，loss值达到最小误差1e-3，便停止训练
]
# 定义训练需要的参数，compile集成了loss，optimizer
model.compile(loss='mean_squared_error',  # 定义损失函数
              optimizer='nadam',  # 定义优化器
              metrics=['mse', 'mae'])  # 定义评估指标

history = model.fit(x_train_st, y_train_st, epochs=300, callbacks=callbacks,
                    validation_data=(x_valid_scaled, y_valid_scaled), batch_size=10)  # fit开始训练

##模型结果保存
pkl_filename = os.path.abspath('.') + 'best4_pickle_model_0_10.h5'
model.save(pkl_filename)

predict = model.predict(x_test_st, 30)
# predict.append(res)
res = y_scaler.inverse_transform(predict)
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False
# 4. 评估模型
eval = model.evaluate(x_test_st, y_test_st, verbose=0)
print("Evaluation on test data: loss = %0.6f accuracy = %0.2f%% \n" \
      % (eval[0], eval[1] * 100))
plt.rcParams['font.size'] = 20
X = np.linspace(0, np.array(x_test).shape[0], np.array(x_test).shape[0], endpoint=True)

plt.xlabel('样本点编号')
plt.ylabel('CA'),
plt.plot(X, res[:, 0], color="blue", linewidth=1.0, linestyle="-", label='预测值')
plt.plot(X, y_test[:, 0], color="#800080", linewidth=2.0, linestyle="--", label='原始数据值')
plt.legend(loc=0)
plt.show()
t1 = res[:, 0]
t2 = y_test[:, 0]
err1 = error(t1, t2)
err2 = error_MAE(t1, t2)
# err1 = err1/t2.shape[0]
print("CA MSE误差：" + str(err1))
print("CA MAE误差：" + str(err2))
