import numpy as np
import pandas as pd
from sklearn.metrics import mean_squared_error, r2_score
from mpl_toolkits.mplot3d import Axes3D

import math
import os
import pickle
import keras
from matplotlib.ticker import FormatStrFormatter
from mpl_toolkits.mplot3d import Axes3D
from sklearn import preprocessing
from sklearn.model_selection import train_test_split
import pandas as pd  # 是python的一个数据分析包
import matplotlib.pyplot as plt
from keras.models import Sequential
from keras.layers import Dense
from keras import optimizers
import numpy as np
import cdm as cdm



# CDM=======================================================================

df1 = pd.read_excel("NACA 2414/Re=60400/CDM_Re60400.xlsx")
df2 = pd.read_excel("NACA 2414/Re=100800/CDM_Re100800.xlsx")
df3 = pd.read_excel('NACA 2414/Re=201600/CDM_Re201600.xlsx')
df4 = pd.read_excel("NACA 2414/Re=302700/CDM_Re302700.xlsx")
x1 = np.array(df1.iloc[:, [4, 0]])
x2 = np.array(df2.iloc[:, [4, 0]])
x3 = np.array(df3.iloc[:, [4, 0]])
x4 = np.array(df4.iloc[:, [4, 0]])

x = np.concatenate((x1, x2, x3, x4))


y1 = np.array(df1.iloc[:, [1, 2]])
y2 = np.array(df2.iloc[:, [1, 2]])
y3 = np.array(df3.iloc[:, [1, 2]])
y4 = np.array(df4.iloc[:, [1, 2]])

# y1 = np.array(df1.iloc[:, 1])[:, np.newaxis]
# y2 = np.array(df2.iloc[:, 1)[:, np.newaxis]
# y3 = np.array(df3.iloc[:, 1)[:, np.newaxis]
# y4 = np.array(df4.iloc[:, 1)[:, np.newaxis]

y = np.concatenate((y1, y2, y3, y4))

x_train = x[0:x.shape[0]:2]
shuffle_ix = np.random.permutation(np.arange(len(x_train)))
x_train = x_train[shuffle_ix]
x_test_all = x[1:x.shape[0]:2]
y_train = y[0:y.shape[0]:2]
y_train = y_train[shuffle_ix]
y_test_all = y[1:y.shape[0]:2]

# x_valid = x_test_all[0:x_test_all.shape[0]:2]
# x_test = x_test_all[1:x_test_all.shape[0]:2]
# y_valid = y_test_all[0:y_test_all.shape[0]:2]
# y_test = y_test_all[1:y_test_all.shape[0]:2]

# Z-Score标准化
x_scaler = preprocessing.StandardScaler().fit(x_train)  # 建立StandardScaler对象
print(x_scaler)
y_scaler = preprocessing.StandardScaler().fit(y_train)  # 建立StandardScaler对象
x_train_st = x_scaler.fit_transform(x_train)  # StandardScaler标准化处理
y_train_st = y_scaler.fit_transform(y_train)  # StandardScaler标准化处理
# x_test_st = x_scaler.fit_transform(x_test)  # StandardScaler标准化处理
# y_test_st = y_scaler.fit_transform(y_test)  # StandardScaler标准化处理
x_test_st = x_scaler.fit_transform(x_test_all)  # StandardScaler标准化处理
y_test_st = y_scaler.fit_transform(y_test_all)  # StandardScaler标准化处理

# x_valid_scaled = x_scaler.transform(x_valid)
# y_valid_scaled = y_scaler.transform(y_valid)


# UIUC======================================================================

Tdf1 = pd.read_excel("NACA 2414/Re=60400/UIUC_Re60400.xlsx")
Tdf2 = pd.read_excel("NACA 2414/Re=100800/UIUC_Re100800.xlsx")
Tdf3 = pd.read_excel('NACA 2414/Re=201600/UIUC_Re201600.xlsx')
Tdf4 = pd.read_excel("NACA 2414/Re=302700/UIUC_Re302700.xlsx")
Tx1 = np.array(Tdf1.iloc[:, [3, 0]])
Tx2 = np.array(Tdf2.iloc[:, [3, 0]])
Tx3 = np.array(Tdf3.iloc[:, [3, 0]])
Tx4 = np.array(Tdf4.iloc[:, [3, 0]])

Tx = np.concatenate((Tx1, Tx2, Tx3, Tx4))
print(Tx)

Ty1 = np.array(Tdf1.iloc[:, [1, 2]])
Ty2 = np.array(Tdf2.iloc[:, [1, 2]])
Ty3 = np.array(Tdf3.iloc[:, [1, 2]])
Ty4 = np.array(Tdf4.iloc[:, [1, 2]])

Ty = np.concatenate((Ty1, Ty2, Ty3, Ty4))
print(Ty)

# =======================================================================
Tx_train = Tx[0:Tx.shape[0]:2]
Tshuffle_ix = np.random.permutation(np.arange(len(Tx_train)))
Tx_train = Tx_train[Tshuffle_ix]
Tx_test_all = Tx[1:Tx.shape[0]:2]
Ty_train = Ty[0:Ty.shape[0]:2]
Ty_train = Ty_train[Tshuffle_ix]
Ty_test_all = Ty[1:Ty.shape[0]:2]

# Tx_valid = Tx_test_all[0:Tx_test_all.shape[0]:2]
# Tx_test = Tx_test_all[1:Tx_test_all.shape[0]:2]
# Ty_valid = Ty_test_all[0:Ty_test_all.shape[0]:2]
# Ty_test = Ty_test_all[1:Ty_test_all.shape[0]:2]

# Z-Score标准化
Tx_scaler = preprocessing.StandardScaler().fit(Tx_train)  # 建立StandardScaler对象

Ty_scaler = preprocessing.StandardScaler().fit(Ty_train)  # 建立StandardScaler对象

Tx_train_st = Tx_scaler.fit_transform(Tx_train)  # StandardScaler标准化处理
Ty_train_st = Ty_scaler.fit_transform(Ty_train)  # StandardScaler标准化处理

Tx_test_st = Tx_scaler.fit_transform(Tx_test_all)  # StandardScaler标准化处理
Ty_test_st = Ty_scaler.fit_transform(Ty_test_all)  # StandardScaler标准化处理

# Tx_valid_scaled = Tx_scaler.transform(Tx_valid)
# Ty_valid_scaled = Ty_scaler.transform(Ty_valid)

# ===========================================================


def error(y_predict, y_test):  # 定义计算误差平方和函数，，，传入的是估算出的值，和测试值，，这里仅仅是用来定义的，方便后面的调用。
    sum = 0
    lens = len(y_predict)
    for i in range(lens):
        e = (y_predict[i] - y_test[i]) ** 2
        # errs.append(e)
        sum += e
    return math.sqrt(sum) / lens


def error_MAE(y_predict, y_test):  # 定义计算误差平方和函数，，，传入的是估算出的值，和测试值，，这里仅仅是用来定义的，方便后面的调用。
    sum = 0
    lens = len(y_predict)
    for i in range(lens):
        e = abs(y_predict[i] - y_test[i])
        # errs.append(e)
        sum += e
    return math.sqrt(sum) / lens


# Load Data
# names1 = ['alpha', 'CL']
# dat1 = pd.read_csv('./NACA6409-CL/UIUC-NACA-6409.txt', skiprows=[0], names=names1, header=None, sep='\s+',
#                    usecols=[0, 1])
# print(dat1)
# x0 = np.mat(np.array(dat1)[:, 0])
# x = x0.reshape(x0.shape[0], 1)
# y0 = np.mat(np.array(dat1)[:, 1])
# y = y0.reshape(y0.shape[0], 1)

# 定义数据
## 均匀划分训练集和测试集
# x_train = x[0:x.shape[0]:2]
# x_test_all = x[1:x.shape[0]:2]
# y_train = y[0:y.shape[0]:2]
# y_test_all = y[1:y.shape[0]:2]
# x_valid = x_test_all[0:x_test_all.shape[0]:2]
# x_test = x_test_all[1:x_test_all.shape[0]:2]
# y_valid = y_test_all[0:y_test_all.shape[0]:2]
# y_test = y_test_all[1:y_test_all.shape[0]:2]

# =======================================================================


# =========================================================================

# ## 随机划分训练集和测试集
# x_train_all, x_test, y_train_all, y_test = train_test_split(x, y, test_size=0.2)
# x_train, x_valid, y_train, y_valid = train_test_split(
#     x_train_all, y_train_all, test_size=0.2)
# x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.3)



# 定义模型
model = Sequential()
model.add(Dense(units=13, activation='relu', input_dim=2, name="layer1"))
model.add(Dense(units=13, activation='relu', name="layer2"))
model.add(Dense(units=13, activation='relu', name="layer3"))
model.add(Dense(units=13, activation='relu', name="layer4"))
model.add(Dense(units=13, activation='relu', name="layer5"))
model.add(Dense(units=13, activation='relu', name="layer6"))
model.add(Dense(units=13, activation='relu', name="layer7"))
model.add(Dense(units=13, activation='relu', name='layer8'))
model.add(Dense(units=13, activation='relu', name='layer9'))
model.add(Dense(units=13, activation='relu', name='layer10'))
# model.add(Dense(units=17, activation='relu', input_dim=17))
# model.add(Dense(units=17, activation='relu', input_dim=17))
# model.add(Dense(units=17, activation='relu', input_dim=17))
# model.add(Dense(units=15, activation='relu', input_dim=17))
model.add(Dense(units=2, input_dim=13))

keras.initializers.glorot_normal(seed=1)

logdir = os.path.abspath('.') + os.sep + "2_dnn-bn-callbacks"  # 反正不要出现斜杠表示路径
if not os.path.exists(logdir):
    os.mkdir(logdir)
output_model_file = os.path.abspath('.') + os.sep + "2_dnn-bn-callbacks" + os.sep + "fashion_mnist_model.h5"
output_scale = os.path.abspath('.') + os.sep + "2_dnn-bn-callbacks" + os.sep + "scale_model.txt"
with open(output_scale, 'wb') as fo:
    pickle.dump([x_scaler, y_scaler], fo, fix_imports=True)
# model.compile(loss='mean_squared_error', optimizer='sgd')

callbacks = [
    keras.callbacks.TensorBoard(logdir),  # 保存在logdir中
    keras.callbacks.ModelCheckpoint(output_model_file,  # 模型名称
                                    save_best_only=True),  # 保存一个最好的模型
    # keras.callbacks.EarlyStopping(patience=5, min_delta=1e-3),#如果连续5次迭代，loss值达到最小误差1e-3，便停止训练
]
# 定义训练需要的参数，compile集成了loss，optimizer
model.compile(loss='mean_squared_error',  # 定义损失函数
              optimizer='nadam',  # 定义优化器
              metrics=['mse', 'mae'])  # 定义评估指标

history = model.fit(x_train_st, y_train_st, epochs=800, validation_split=0.2, callbacks=callbacks,
                    batch_size=8)  # fit开始训练
# ================================================================================================
##模型结果保存
pkl_filename = os.path.abspath('.') + 'best4_pickle_model_0_10.h5'
model.save(pkl_filename)

model.save_weights("BP_model_weights.h5")
# predict = model.predict(x_test_st, 30)
# =====================================================================
predict = model.predict(Tx_test_st, 30)
# =====================================================================
# predict.append(res)
res = y_scaler.inverse_transform(predict)

plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False
# 4. 评估模型
# eval = model.evaluate(x_test_st, y_test_st, verbose=0)
# =====================================================================
eval = model.evaluate(x_test_st, y_test_st, verbose=0)
# =====================================================================
print("Evaluation on test data: loss = %0.6f accuracy = %0.2f%% \n" \
      % (eval[0], eval[1] * 100))
# plt.rcParams['font.size'] = 20
# X = np.linspace(0, np.array(x_test).shape[0], np.array(x_test).shape[0], endpoint=True)

# # =============================================================================================
# plt.xlabel('样本点编号')
# plt.ylabel('CA'),
# plt.scatter(x_train[:, 0], y_train, c="k", label="target_train")
# plt.plot(x_test_all[:, 0], y_test_all, c="#800080", label="target_test", linewidth=2)
# # plt.plot(x_test, res[:, 0], color="blue", linewidth=1.0, linestyle="-", label='预测值')
# # plt.plot(x_test, y_test[:, 0], color="#800080", linewidth=2.0, linestyle="--", label='原始数据值')
# plt.plot(x_test_all[:, 0], res, color="blue", linewidth=1.0, linestyle="-", label='预测值')
# # plt.plot(x_test, y_test, color="#800080", linewidth=2.0, linestyle="--", label='原始数据值')
#
# plt.legend(loc=0)
# plt.show()
# # ================================================================================================


# ================================================================================================================
fig1 = plt.figure(figsize=(7, 6))
ax = Axes3D(fig1)
ax.scatter(np.array(Tx_test_all)[:, 0], np.array(Tx_test_all)[:, 1], Ty_test_all[:, 1], s=200, c='red', marker='.',
           alpha=0.5,
           label='样本点')
ax.scatter(np.array(Tx_test_all)[:, 0], np.array(Tx_test_all)[:, 1], res[:, 1], s=200, c='blue', marker='+', alpha=0.5,
           label='预测点')
# ax.set_title(title)
ax.set_xlabel("    Re", fontsize=22)
ax.set_ylabel("   α", fontsize=22)
ax.set_zlabel(" Cd", fontsize=22)
ax.xaxis.set_major_formatter(FormatStrFormatter('%.0f'))
ax.zaxis.set_major_formatter(FormatStrFormatter('%.2f'))
# ax.zaxis.set_major_locator(MultipleLocator(0.03))
# ax.set_zticklabels(ax.get_zticklabels(),ha='right')
ax.set_xlim(0, 310000)
# ax.set_ylim(-16.00, 15.00)
# plt.xticks(fontsize=20)
# plt.yticks(fontsize=20)
# ax.set_zticks(fontsize=20)
plt.savefig('fig.png', bbox_inches='tight')
# plt.tight_layout()
plt.legend(loc=0)  # 图例位置自
plt.show()

fig1 = plt.figure(figsize=(7, 6))
ax = Axes3D(fig1)
ax.scatter(np.array(Tx_test_all)[:, 0], np.array(Tx_test_all)[:, 1], Ty_test_all[:, 0], s=200, c='red', marker='.',
           alpha=0.5,
           label='样本点')
ax.scatter(np.array(Tx_test_all)[:, 0], np.array(Tx_test_all)[:, 1], res[:, 0], s=200, c='blue', marker='+', alpha=0.5,
           label='预测点')
# ax.set_title(title)
ax.set_xlabel("    Re", fontsize=22)
ax.set_ylabel("   α", fontsize=22)
ax.set_zlabel(" Cl", fontsize=22)
ax.xaxis.set_major_formatter(FormatStrFormatter('%.0f'))
ax.zaxis.set_major_formatter(FormatStrFormatter('%.2f'))
# ax.zaxis.set_major_locator(MultipleLocator(0.03))
# ax.set_zticklabels(ax.get_zticklabels(),ha='right')
ax.set_xlim(0, 310000)
# ax.set_ylim(-16.00, 15.00)
# plt.xticks(fontsize=20)
# plt.yticks(fontsize=20)
# ax.set_zticks(fontsize=20)
plt.savefig('fig.png', bbox_inches='tight')
# plt.tight_layout()
plt.legend(loc=0)  # 图例位置自
plt.show()
# ============================================================================================================


# 误差计算==============================================================================================================

print("UIUC数据Cd误差：")
mse_XFLR_Cl = mean_squared_error(Ty_test_all[:, 1], res[:, 1])
print("MSE of Cl:", mse_XFLR_Cl)
r2_XFLR_Cl = r2_score(Ty_test_all[:, 1], res[:, 1])
print("R2 of Cl", r2_XFLR_Cl)

print("*" * 100)

print("UIUC数据Cl误差：")
mse_XFLR_Cl = mean_squared_error(Ty_test_all[:, 0], res[:, 0])
print("MSE of Cl:", mse_XFLR_Cl)
r2_XFLR_Cl = r2_score(Ty_test_all[:, 0], res[:, 0])
print("R2 of Cl", r2_XFLR_Cl)
