import math
import os
import pickle

import keras
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd  # 是python的一个数据分析包
import tensorflow as tf
from keras.layers import Dense
from keras.models import Sequential
from matplotlib.ticker import FormatStrFormatter
from mpl_toolkits.mplot3d import Axes3D
from sklearn import preprocessing

from rbflayer import RBFLayer, InitCentersRandom

#

seed = 42

np.random.seed(seed)

tf.random.set_seed(seed)

df1 = pd.read_excel("NACA 2414/Re=60400/XFLR_Re60400.xlsx", engine='openpyxl')
df2 = pd.read_excel("NACA 2414/Re=100800/XFLR_Re100800.xlsx", engine='openpyxl')
df3 = pd.read_excel('NACA 2414/Re=201600/XFLR_Re201600.xlsx', engine='openpyxl')
df4 = pd.read_excel("NACA 2414/Re=302700/XFLR_Re302700.xlsx", engine='openpyxl')

x1 = np.array(df1.iloc[:, [4, 0]])
x2 = np.array(df2.iloc[:, [4, 0]])
x3 = np.array(df3.iloc[:, [4, 0]])
x4 = np.array(df4.iloc[:, [4, 0]])

x = np.concatenate((x1, x2, x3, x4))
# x = x4

y1 = np.array(df1.iloc[:, [5, 2]])
y2 = np.array(df2.iloc[:, [5, 2]])
y3 = np.array(df3.iloc[:, [5, 2]])
y4 = np.array(df4.iloc[:, [5, 2]])

# y1 = np.array(df1.iloc[:, 1])[:, np.newaxis]
# y2 = np.array(df2.iloc[:, 1)[:, np.newaxis]
# y3 = np.array(df3.iloc[:, 1)[:, np.newaxis]
# y4 = np.array(df4.iloc[:, 1)[:, np.newaxis]

y = np.concatenate((y1, y2, y3, y4))


# y = y4


# ===========================================================


def error(y_predict, y_test):  # 定义计算误差平方和函数，，，传入的是估算出的值，和测试值，，这里仅仅是用来定义的，方便后面的调用。
    sum = 0
    lens = len(y_predict)
    for i in range(lens):
        e = (y_predict[i] - y_test[i]) ** 2
        # errs.append(e)
        sum += e
    return math.sqrt(sum) / lens


def error_MAE(y_predict, y_test):  # 定义计算误差平方和函数，，，传入的是估算出的值，和测试值，，这里仅仅是用来定义的，方便后面的调用。
    sum = 0
    lens = len(y_predict)
    for i in range(lens):
        e = abs(y_predict[i] - y_test[i])
        # errs.append(e)
        sum += e
    return math.sqrt(sum) / lens


# 1/2===================================
x_train = x[0:x.shape[0]:2]
# shuffle_ix = np.random.permutation(np.arange(len(x_train)))
# x_train = x_train[shuffle_ix]
x_test_all = x[1:x.shape[0]:2]
y_train = y[0:y.shape[0]:2]
# y_train = y_train[shuffle_ix]
y_test_all = y[1:y.shape[0]:2]
# 2/3===========================================
# x_test_all = x[2:x.shape[0]:3]
# xt1 = x[0:x.shape[0]:3]
# xt2 = x[1:x.shape[0]:3]
# x_train = np.concatenate((xt1,xt2))
#
# y_test_all = y[2:x.shape[0]:3]
# yt1 = x[0:x.shape[0]:3]
# yt2 = x[1:x.shape[0]:3]
# y_train = np.concatenate((xt1,xt2))

# x_valid = x_test_all[0:x_test_all.shape[0]:2]
# x_test = x_test_all[1:x_test_all.shape[0]:2]
# y_valid = y_test_all[0:y_test_all.shape[0]:2]
# y_test = y_test_all[1:y_test_all.shape[0]:2]

# =========================================================================

# ## 随机划分训练集和测试集
# x_train_all, x_test, y_train_all, y_test = train_test_split(x, y, test_size=0.2)
# x_train, x_valid, y_train, y_valid = train_test_split(
#     x_train_all, y_train_all, test_size=0.2)
# x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.3)

# train 的50%做验证集=============================================
x_valid = x_train[1:x_train.shape[0]:2]
x_train = x_train[0:x_train.shape[0]:2]

y_valid = y_train[1:y_train.shape[0]:2]
y_train = y_train[0:y_train.shape[0]:2]

# 20%===================================================
# x_valid = x_train[3:x_train.shape[0]:5]
# x_train1 = x_train[0:x_train.shape[0]:5]
# x_train2 = x_train[1:x_train.shape[0]:5]
# x_train3 = x_train[2:x_train.shape[0]:5]
# x_train4 = x_train[4:x_train.shape[0]:5]
# x_train5 = np.concatenate((x_train1, x_train2, x_train3, x_train4))
# x_train = x_train5
#
# y_valid = y_train[3:y_train.shape[0]:5]
# y_train1 = y_train[0:y_train.shape[0]:5]
# y_train2 = y_train[1:y_train.shape[0]:5]
# y_train3 = y_train[2:y_train.shape[0]:5]
# y_train4 = y_train[4:y_train.shape[0]:5]
# y_train5 = np.concatenate((y_train1, y_train2, y_train3, y_train4))
# y_train = y_train5

# =================================================================================

# Z-Score标准化
x_scaler = preprocessing.StandardScaler().fit(x_train)  # 建立StandardScaler对象
print(x_scaler)
y_scaler = preprocessing.StandardScaler().fit(y_train)  # 建立StandardScaler对象
x_train_st = x_scaler.fit_transform(x_train)  # StandardScaler标准化处理
y_train_st = y_scaler.fit_transform(y_train)  # StandardScaler标准化处理
# x_test_st = x_scaler.fit_transform(x_test)  # StandardScaler标准化处理
# y_test_st = y_scaler.fit_transform(y_test)  # StandardScaler标准化处理
x_test_st = x_scaler.fit_transform(x_test_all)  # StandardScaler标准化处理
y_test_st = y_scaler.fit_transform(y_test_all)  # StandardScaler标准化处理
#
x_valid_scaled = x_scaler.transform(x_valid)
y_valid_scaled = y_scaler.transform(y_valid)

rbflayer = RBFLayer(10,
                    initializer=InitCentersRandom(x_test_st),
                    betas=2.0,
                    input_shape=(2,),
                    name="rbf")
# 定义模型
u = 13
model = Sequential()
# model.add(Dense(units=u, activation='relu', input_dim=2, name="input"))
model.add(rbflayer)
# model.add(Dense(units=u, activation='relu', name="layer2"))
# # model.add(BatchNormalization(epsilon=0.001, center=True))
# # model.add(Dropout(0.3))
# model.add(Dense(units=u, activation='relu', name="layer3"))
# model.add(Dense(units=u, activation='relu', name="layer4"))
# model.add(Dense(units=u, activation='relu', name="layer5"))
# model.add(Dense(units=u, activation='relu', name="layer6"))
# model.add(Dense(units=u, activation='relu', name="layer7"))
# model.add(Dense(units=u, activation='relu', name='layer8'))
# model.add(Dense(units=u, activation='relu', name='layer9'))
# model.add(Dense(units=u, activation='relu', name='layer10'))

model.add(Dense(units=2, input_dim=u, name="output"))

# keras.initializers.glorot_normal(seed=seed)

logdir = os.path.abspath('.') + os.sep + "2_dnn-bn-callbacks"  # 反正不要出现斜杠表示路径
if not os.path.exists(logdir):
    os.mkdir(logdir)
output_model_file = os.path.abspath('.') + os.sep + "2_dnn-bn-callbacks" + os.sep + "fashion_mnist_model.h5"
output_scale = os.path.abspath('.') + os.sep + "2_dnn-bn-callbacks" + os.sep + "scale_model.txt"
with open(output_scale, 'wb') as fo:
    pickle.dump([x_scaler, y_scaler], fo, fix_imports=True)
# model.compile(loss='mean_squared_error', optimizer='sgd')

callbacks = [
    keras.callbacks.TensorBoard(logdir),  # 保存在logdir中
    keras.callbacks.ModelCheckpoint(output_model_file,  # 模型名称
                                    save_best_only=True),  # 保存一个最好的模型
    # keras.callbacks.EarlyStopping(patience=5, min_delta=1e-3),#如果连续5次迭代，loss值达到最小误差1e-3，便停止训练
]
# 定义训练需要的参数，compile集成了loss，optimizer
model.compile(loss='mean_squared_error',  # 定义损失函数
              optimizer='nadam',  # 定义优化器
              metrics=['mse', 'mae'])  # 定义评估指标

history = model.fit(x_train_st, y_train_st, epochs=2000, validation_data=(x_valid_scaled, y_valid_scaled),
                    callbacks=callbacks,
                    batch_size=8)  # fit开始训练
# ================================================================================================
##模型结果保存
pkl_filename = os.path.abspath('.') + 'rbf_pickle_model_0_10.h5'
model.save(pkl_filename)

model.save_weights("rbf_model_weights.h5")
# predict = model.predict(x_test_st, 30)
# =====================================================================
predict = model.predict(x_test_st)

# =====================================================================
# predict.append(res)
res = y_scaler.inverse_transform(predict)

plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False
# 4. 评估模型
# eval = model.evaluate(x_test_st, y_test_st, verbose=0)
# =====================================================================
# eval = model.evaluate(x_test_st, y_test_st, verbose=0)
# # =====================================================================
# print("Evaluation on test data: loss = %0.6f accuracy = %0.2f%% \n" \
#       % (eval[0], eval[1] * 100))

# ================================================================================================================
fig1 = plt.figure(figsize=(7, 6))
ax = Axes3D(fig1)
ax.scatter(np.array(x_test_all)[:, 0], np.array(x_test_all)[:, 1], y_test_all[:, 1], s=200, c='red', marker='.',
           alpha=0.5,
           label='样本点')
ax.scatter(np.array(x_test_all)[:, 0], np.array(x_test_all)[:, 1], res[:, 1], s=200, c='blue', marker='+', alpha=0.5,
           label='预测点')
# ax.set_title(title)
ax.set_xlabel("    Re", fontsize=22)
ax.set_ylabel("   α", fontsize=22)
ax.set_zlabel(" Cd", fontsize=22)

plt.tick_params(labelsize=10)
labels = ax.get_xticklabels() + ax.get_yticklabels() + ax.get_zticklabels()

ax.xaxis.set_major_formatter(FormatStrFormatter('%.0f'))
ax.zaxis.set_major_formatter(FormatStrFormatter('%.2f'))
# ax.zaxis.set_major_locator(MultipleLocator(0.03))
# ax.set_zticklabels(ax.get_zticklabels(),ha='right')
ax.set_xlim(0, 310000)
# ax.set_ylim(-16.00, 15.00)
# plt.xticks(fontsize=20)
# plt.yticks(fontsize=20)
# ax.set_zticks(fontsize=20)
# plt.savefig('fig.png', bbox_inches='tight')
# plt.tight_layout()
plt.legend(loc=0, fontsize=20)  # 图例位置自

plt.show()

fig1 = plt.figure(figsize=(7, 6))
ax = Axes3D(fig1)
ax.scatter(np.array(x_test_all)[:, 0], np.array(x_test_all)[:, 1], y_test_all[:, 0], s=200, c='red', marker='.',
           alpha=0.5,
           label='样本点')
ax.scatter(np.array(x_test_all)[:, 0], np.array(x_test_all)[:, 1], res[:, 0], s=200, c='blue', marker='+', alpha=0.5,
           label='预测点')
# ax.set_title(title)
ax.set_xlabel("    Re", fontsize=22)
ax.set_ylabel("   α", fontsize=22)
ax.set_zlabel(" Cl", fontsize=22)

plt.tick_params(labelsize=10)
labels = ax.get_xticklabels() + ax.get_yticklabels() + ax.get_zticklabels()

ax.xaxis.set_major_formatter(FormatStrFormatter('%.0f'))
ax.zaxis.set_major_formatter(FormatStrFormatter('%.2f'))
# ax.zaxis.set_major_locator(MultipleLocator(0.03))
# ax.set_zticklabels(ax.get_zticklabels(),ha='right')
ax.set_xlim(0, 310000)
# ax.set_ylim(-16.00, 15.00)
# plt.xticks(fontsize=20)
# plt.yticks(fontsize=20)
# ax.set_zticks(fontsize=20)
plt.savefig('fig.png', bbox_inches='tight')
# plt.tight_layout()
plt.legend(loc=0, fontsize=20)  # 图例位置自
plt.show()


# ============================================================================================================
def plot_learning_curves(history):
    plt.rcParams['font.size'] = 22
    pd.DataFrame(history.history).plot(figsize=(8, 7))
    plt.grid(True)
    plt.xlabel("迭代次数")
    plt.ylabel("损失函数值")
    plt.gca().set_ylim(0, 1)
    plt.show()


plot_learning_curves(history)
