from sklearn.metrics import mean_squared_error, r2_score, mean_absolute_percentage_error
from mpl_toolkits.mplot3d import Axes3D
from keras import regularizers
import math
import os
import pickle
import keras
from matplotlib.ticker import FormatStrFormatter
from mpl_toolkits.mplot3d import Axes3D
from sklearn import preprocessing
from sklearn.model_selection import train_test_split
import pandas as pd  # 是python的一个数据分析包
import matplotlib.pyplot as plt
from keras.models import Sequential
from keras.layers import Dense, BatchNormalization, LSTM, Dropout, SimpleRNN
from keras import optimizers
import numpy as np
import tensorflow as tf

seed = 42
np.random.seed(seed)
tf.random.set_seed(seed)
#
# df1 = pd.read_excel("NACA 2414/Re=60400/XFLR_Re60400.xlsx")
# df2 = pd.read_excel("NACA 2414/Re=100800/XFLR_Re100800.xlsx")
# df3 = pd.read_excel('NACA 2414/Re=201600/XFLR_Re201600.xlsx')
# df4 = pd.read_excel("NACA 2414/Re=302700/XFLR_Re302700.xlsx")
# x1 = np.array(df1.iloc[:, [4, 0]])
# x2 = np.array(df2.iloc[:, [4, 0]])
# x3 = np.array(df3.iloc[:, [4, 0]])
# x4 = np.array(df4.iloc[:, [4, 0]])

# UIUC=======================================================================

df1 = pd.read_excel("NACA 2414/Re=60400/UIUC_Re60400.xlsx", engine='openpyxl')
df2 = pd.read_excel("NACA 2414/Re=100800/UIUC_Re100800.xlsx", engine='openpyxl')
df3 = pd.read_excel('NACA 2414/Re=201600/UIUC_Re201600.xlsx', engine='openpyxl')
df4 = pd.read_excel("NACA 2414/Re=302700/UIUC_Re302700.xlsx", engine='openpyxl')

# df1 = pd.read_excel("NACA 2415/Re=60000/UIUC_Re60000.xlsx", engine='openpyxl')
# df2 = pd.read_excel("NACA 2415/Re=101300/UIUC_Re101300.xlsx", engine='openpyxl')
# df3 = pd.read_excel('NACA 2415/Re=201900/UIUC_Re201900.xlsx', engine='openpyxl')
# df4 = pd.read_excel("NACA 2415/Re=303100/UIUC_Re303100.xlsx", engine='openpyxl')

# df1 = pd.read_excel("NACA 2415/Re=60000/XFLR_Re60000.xlsx", engine='openpyxl')
# df2 = pd.read_excel("NACA 2415/Re=101300/XFLR_Re101300.xlsx", engine='openpyxl')
# df3 = pd.read_excel('NACA 2415/Re=201900/XFLR_Re201900.xlsx', engine='openpyxl')
# df4 = pd.read_excel("NACA 2415/Re=303100/XFLR_Re303100.xlsx", engine='openpyxl')

x1 = np.array(df1.iloc[:, [3, 0]])
x2 = np.array(df2.iloc[:, [3, 0]])
x3 = np.array(df3.iloc[:, [3, 0]])
x4 = np.array(df4.iloc[:, [3, 0]])

# x1 = np.array(df1.iloc[:, [4, 0]])
# x2 = np.array(df2.iloc[:, [4, 0]])
# x3 = np.array(df3.iloc[:, [4, 0]])
# x4 = np.array(df4.iloc[:, [4, 0]])

x = np.concatenate((x1, x2, x3, x4))
# print(x)
# x = x[0:x.shape[0]:2]
# x = x4

y1 = np.array(df1.iloc[:, [1, 2]])
y2 = np.array(df2.iloc[:, [1, 2]])
y3 = np.array(df3.iloc[:, [1, 2]])
y4 = np.array(df4.iloc[:, [1, 2]])

# y1 = np.array(df1.iloc[:, 1])[:, np.newaxis]
# y2 = np.array(df2.iloc[:, 1)[:, np.newaxis]
# y3 = np.array(df3.iloc[:, 1)[:, np.newaxis]
# y4 = np.array(df4.iloc[:, 1)[:, np.newaxis]

y = np.concatenate((y1, y2, y3, y4))
# y = y[0:y.shape[0]:2]


# y = y4
print(x)
print(y)


# ===========================================================


def error(y_predict, y_test):  # 定义计算误差平方和函数，，，传入的是估算出的值，和测试值，，这里仅仅是用来定义的，方便后面的调用。
    sum = 0
    lens = len(y_predict)
    for i in range(lens):
        e = (y_predict[i] - y_test[i]) ** 2
        # errs.append(e)
        sum += e
    return math.sqrt(sum) / lens


def error_MAE(y_predict, y_test):  # 定义计算误差平方和函数，，，传入的是估算出的值，和测试值，，这里仅仅是用来定义的，方便后面的调用。
    sum = 0
    lens = len(y_predict)
    for i in range(lens):
        e = abs(y_predict[i] - y_test[i])
        # errs.append(e)
        sum += e
    return math.sqrt(sum) / lens


# x_train = np.concatenate((x2, x3))
# y_train = np.concatenate((y2, y3))
# x_test_all = np.concatenate((x1, x4))
# y_test_all = np.concatenate((y1, y4))

# Load Data
# names1 = ['alpha', 'CL']
# dat1 = pd.read_csv('./NACA6409-CL/UIUC-NACA-6409.txt', skiprows=[0], names=names1, header=None, sep='\s+',
#                    usecols=[0, 1])
# print(dat1)
# x0 = np.mat(np.array(dat1)[:, 0])
# x = x0.reshape(x0.shape[0], 1)
# y0 = np.mat(np.array(dat1)[:, 1])
# y = y0.reshape(y0.shape[0], 1)

# 定义数据
## 均匀划分训练集和测试集


# 二分之一训练=======================================================================
# x_train = x[0:x.shape[0]:2]
# x_test_all = x[1:x.shape[0]:2]
# y_train = y[0:y.shape[0]:2]
# y_test_all = y[1:y.shape[0]:2]

#
# x_test_all = x[0:x.shape[0]:2]
# # Tshuffle_test_allix = np.random.permutation(np.arange(len(Tx_train)))
# # Tx_train = Tx_train[Tshuffle_ix]
# x_train = x[1:x.shape[0]:2]
# y_test_all = y[0:y.shape[0]:2]
# # Ty_train = Ty_train[Tshuffle_ix]
# y_train = y[1:y.shape[0]:2]


#  三分之一训练====================================================================
# x_train = x[0:x.shape[0]:3]
# # shuffle_ix = np.random.permutation(np.arange(len(x_train)))
# # x_train = x_train[shuffle_ix]
# X1 = x[1:x.shape[0]:3]
# X2 = x[2:x.shape[0]:3]
# X3 = np.concatenate((X1, X2))
# x_test_all = X3
#
# y_train = y[0:y.shape[0]:3]
# # y_train = y_train[shuffle_ix]
# Y1 = y[1:y.shape[0]:3]
# Y2 = y[2:y.shape[0]:3]
# Y3 = np.concatenate((Y1, Y2))
# y_test_all = Y3

# 1/4做训练====================================================================
# x_train = x[0:x.shape[0]:4]
# # shuffle_ix = np.random.permutation(np.arange(len(x_train)))
# # x_train = x_train[shuffle_ix]
# X1 = x[1:x.shape[0]:4]
# X2 = x[2:x.shape[0]:4]
# X3 = x[3:x.shape[0]:4]
# X4 = np.concatenate((X1, X2, X3))
# x_test_all = X4
#
# y_train = y[0:y.shape[0]:4]
# # y_train = y_train[shuffle_ix]
# Y1 = y[1:y.shape[0]:4]
# Y2 = y[2:y.shape[0]:4]
# Y3 = y[3:y.shape[0]:4]
# Y4 = np.concatenate((Y1, Y2, Y3))
# y_test_all = Y4
# ================================================================

# 1/5训练=============================================================
# x_train = x[0:x.shape[0]:5]
# X1 = x[1:x.shape[0]:5]
# X2 = x[2:x.shape[0]:5]
# X3 = x[3:x.shape[0]:5]
# X4 = x[4:x.shape[0]:5]
# X5 = np.concatenate((X1, X2, X3, X4))
# x_test_all = X5
#
# y_train = y[0:y.shape[0]:5]
#
# Y1 = y[1:y.shape[0]:5]
# Y2 = y[2:y.shape[0]:5]
# Y3 = y[3:y.shape[0]:5]
# Y4 = y[4:y.shape[0]:5]
# Y5 = np.concatenate((Y1, Y2, Y3, Y4))
# y_test_all = Y5

# 1/6=================================
x_train = x[0:x.shape[0]:6]
X1 = x[1:x.shape[0]:6]
X2 = x[2:x.shape[0]:6]
X3 = x[3:x.shape[0]:6]
X4 = x[4:x.shape[0]:6]
X5 = x[5:x.shape[0]:6]
x_test_all = np.concatenate((X1, X2, X3, X4, X5))

y_train = y[0:y.shape[0]:6]

Y1 = y[1:y.shape[0]:6]
Y2 = y[2:y.shape[0]:6]
Y3 = y[3:y.shape[0]:6]
Y4 = y[4:y.shape[0]:6]
Y5 = y[5:y.shape[0]:6]
y_test_all = np.concatenate((Y1, Y2, Y3, Y4, Y5))

# =================================================================

# x_valid = x_test_all[0:x_test_all.shape[0]:2]
# x_test = x_test_all[1:x_test_all.shape[0]:2]
# y_valid = y_test_all[0:y_test_all.shape[0]:2]
# y_test = y_test_all[1:y_test_all.shape[0]:2]

# =========================================================================


# ## 随机划分训练集和测试集
# x_train_all, x_test, y_train_all, y_test = train_test_split(x, y, test_size=0.2)
# x_train, x_valid, y_train, y_valid = train_test_split(
#     x_train_all, y_train_all, test_size=0.2)
# x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.3)

# 50%===================================
# x_valid = x_train[1:x_train.shape[0]:2]
# x_train = x_train[0:x_train.shape[0]:2]
#
# y_valid = y_train[1:y_train.shape[0]:2]
# y_train = y_train[0:y_train.shape[0]:2]
# train的20%做验证=========================================
x_valid = x_train[0:x_train.shape[0]:5]
x_train1 = x_train[1:x_train.shape[0]:5]
x_train2 = x_train[2:x_train.shape[0]:5]
x_train3 = x_train[3:x_train.shape[0]:5]
x_train4 = x_train[4:x_train.shape[0]:5]
x_train5 = np.concatenate((x_train1, x_train2, x_train3, x_train4))
x_train = x_train5

y_valid = y_train[0:y_train.shape[0]:5]
y_train1 = y_train[1:y_train.shape[0]:5]
y_train2 = y_train[2:y_train.shape[0]:5]
y_train3 = y_train[3:y_train.shape[0]:5]
y_train4 = y_train[4:y_train.shape[0]:5]
y_train5 = np.concatenate((y_train1, y_train2, y_train3, y_train4))
y_train = y_train5

# 25%
# x_valid = x_train[2:x_train.shape[0]:4]
# x_train1 = x_train[0:x_train.shape[0]:4]
# x_train2 = x_train[1:x_train.shape[0]:4]
# x_train3 = x_train[3:x_train.shape[0]:4]
# x_train5 = np.concatenate((x_train1, x_train2, x_train3))
# x_train = x_train5
#
# y_valid = y_train[2:y_train.shape[0]:4]
# y_train1 = y_train[0:y_train.shape[0]:4]
# y_train2 = y_train[1:y_train.shape[0]:4]
# y_train3 = y_train[3:y_train.shape[0]:4]
# y_train5 = np.concatenate((y_train1, y_train2, y_train3))
# y_train = y_train5

# 30%
# x_valid = x_train[1:x_train.shape[0]:3]
# x_train1 = x_train[0:x_train.shape[0]:3]
# x_train2 = x_train[2:x_train.shape[0]:3]
# x_train5 = np.concatenate((x_train1, x_train2))
# x_train = x_train5
#
# y_valid = y_train[1:y_train.shape[0]:3]
# y_train1 = y_train[0:y_train.shape[0]:3]
# y_train2 = y_train[2:y_train.shape[0]:3]
# y_train5 = np.concatenate((y_train1, y_train2))
# y_train = y_train5
# =====================================================

# Z-Score标准化
x_scaler = preprocessing.StandardScaler().fit(x)  # 建立StandardScaler对象
print(x_scaler)
y_scaler = preprocessing.StandardScaler().fit(y)  # 建立StandardScaler对象
x_train_st = x_scaler.transform(x_train)  # StandardScaler标准化处理
y_train_st = y_scaler.transform(y_train)  # StandardScaler标准化处理
# x_test_st = x_scaler.transform(x_test)  # StandardScaler标准化处理
# y_test_st = y_scaler.transform(y_test)  # StandardScaler标准化处理
x_test_st = x_scaler.transform(x_test_all)  # StandardScaler标准化处理
y_test_st = y_scaler.transform(y_test_all)  # StandardScaler标准化处理

x_valid_scaled = x_scaler.transform(x_valid)
y_valid_scaled = y_scaler.transform(y_valid)

# 定义模型
units = 13
model = Sequential()
model.add(Dense(units=units, activation='relu', input_dim=2, name="layer1"))
model.add(Dense(units=units, activation='relu', name="layer2"))
# model.add(BatchNormalization(epsilon=0.001, center=True))
# model.add(Dropout(0.5))
model.add(Dense(units=units, activation='relu', name="layer3"))
# model.add(Dense(units=units, activation='relu', name="layer4"))
# model.add(Dense(units=units, activation='relu', name="layer5"))
# model.add(Dense(units=units, activation='relu', name="layer6"))
# model.add(Dense(units=units, activation='relu', name="layer7"))
# model.add(Dense(units=units, activation='relu', name='layer8'))
# model.add(Dense(units=units, activation='relu', name='layer9'))
# model.add(Dense(units=units, activation='relu', name='layer10'))

model.add(Dense(units=2, input_dim=units))

# keras.initializers.glorot_normal(seed=None)

logdir = os.path.abspath('.') + os.sep + "2_dnn-bn-callbacks"  # 反正不要出现斜杠表示路径
if not os.path.exists(logdir):
    os.mkdir(logdir)
output_model_file = os.path.abspath('.') + os.sep + "2_dnn-bn-callbacks" + os.sep + "fashion_mnist_model.h5"
output_scale = os.path.abspath('.') + os.sep + "2_dnn-bn-callbacks" + os.sep + "scale_model.txt"
with open(output_scale, 'wb') as fo:
    pickle.dump([x_scaler, y_scaler], fo, fix_imports=True)
# model.compile(loss='mean_squared_error', optimizer='sgd')

callbacks = [
    keras.callbacks.TensorBoard(logdir),  # 保存在logdir中
    keras.callbacks.ModelCheckpoint(output_model_file,  # 模型名称
                                    save_best_only=True),  # 保存一个最好的模型
    # keras.callbacks.EarlyStopping(patience=5, min_delta=1e-3),#如果连续5次迭代，loss值达到最小误差1e-3，便停止训练
]
# 定义训练需要的参数，compile集成了loss，optimizer
model.compile(loss='mse',  # 定义损失函数
              optimizer='nadam',  # 定义优化器
              metrics=['mse', 'mae'])  # 定义评估指标

history = model.fit(x_train_st, y_train_st, epochs=1000, validation_data=(x_valid_scaled, y_valid_scaled),
                    callbacks=callbacks,
                    batch_size=8)  # fit开始训练
# ================================================================================================
##模型结果保存
pkl_filename = os.path.abspath('.') + 'best4_pickle_model_0_10.h5'
model.save(pkl_filename)

model.save_weights("BP_model_weights.h5")
# predict = model.predict(x_test_st, 30)
# =====================================================================
predict = model.predict(x_test_st, 30)
# =====================================================================
# predict.append(res)
res = y_scaler.inverse_transform(predict)

plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False
# 4. 评估模型
# eval = model.evaluate(x_test_st, y_test_st, verbose=0)
# =====================================================================
eval = model.evaluate(x_test_st, y_test_st, verbose=0)
# =====================================================================
print("Evaluation on test data: loss = %0.6f accuracy = %0.2f%% \n" \
      % (eval[0], eval[1] * 100))
# plt.rcParams['font.size'] = 20
# X = np.linspace(0, np.array(x_test).shape[0], np.array(x_test).shape[0], endpoint=True)

# # =============================================================================================
# plt.xlabel('样本点编号')
# plt.ylabel('CA'),
# plt.scatter(x_train[:, 0], y_train, c="k", label="target_train")
# plt.plot(x_test_all[:, 0], y_test_all, c="#800080", label="target_test", linewidth=2)
# # plt.plot(x_test, res[:, 0], color="blue", linewidth=1.0, linestyle="-", label='预测值')
# # plt.plot(x_test, y_test[:, 0], color="#800080", linewidth=2.0, linestyle="--", label='原始数据值')
# plt.plot(x_test_all[:, 0], res, color="blue", linewidth=1.0, linestyle="-", label='预测值')
# # plt.plot(x_test, y_test, color="#800080", linewidth=2.0, linestyle="--", label='原始数据值')
#
# plt.legend(loc=0)
# plt.show()
# # ================================================================================================


# ================================================================================================================
fig1 = plt.figure(figsize=(7, 6))
ax = Axes3D(fig1)
ax.scatter(np.array(x_test_all)[:, 0], np.array(x_test_all)[:, 1], y_test_all[:, 1], s=200, c='red', marker='.',
           alpha=0.5,
           label='样本点')
ax.scatter(np.array(x_test_all)[:, 0], np.array(x_test_all)[:, 1], res[:, 1], s=200, c='blue', marker='+', alpha=0.5,
           label='预测点')
# ax.set_title(title)
ax.set_xlabel("    Re", fontsize=22)
ax.set_ylabel("   α", fontsize=22)
ax.set_zlabel(" Cd", fontsize=22)
ax.xaxis.set_major_formatter(FormatStrFormatter('%.0f'))
ax.zaxis.set_major_formatter(FormatStrFormatter('%.2f'))
# ax.zaxis.set_major_ax.set_xlim(0, 310000)locator(MultipleLocator(0.03))
# ax.set_zticklabels(ax.get_zticklabels(),ha='right')
ax.set_xlim(0, 310000)

# 坐标轴数字大小
plt.tick_params(labelsize=10)
labels = ax.get_xticklabels() + ax.get_yticklabels() + ax.get_zticklabels()
# ax.set_ylim(-16.00, 15.00)

# plt.xticks(fontsize=20)
# plt.yticks(fontsize=20)
# ax.set_zticks(fontsize=20)
plt.savefig('fig.png', bbox_inches='tight')
# plt.tight_layout()
# 图例说明
plt.legend(loc=0, fontsize=20)  #
plt.show()

fig1 = plt.figure(figsize=(7, 6))
ax = Axes3D(fig1)
ax.scatter(np.array(x_test_all)[:, 0], np.array(x_test_all)[:, 1], y_test_all[:, 0], s=200, c='red', marker='.',
           alpha=0.5,
           label='样本点')
ax.scatter(np.array(x_test_all)[:, 0], np.array(x_test_all)[:, 1], res[:, 0], s=200, c='blue', marker='+', alpha=0.5,
           label='预测点')
# ax.set_title(title)
ax.set_xlabel("    Re", fontsize=22)
ax.set_ylabel("   α", fontsize=22)
ax.set_zlabel(" Cl", fontsize=22)

plt.tick_params(labelsize=10)
labels = ax.get_xticklabels() + ax.get_yticklabels() + ax.get_zticklabels()

ax.xaxis.set_major_formatter(FormatStrFormatter('%.0f'))
ax.zaxis.set_major_formatter(FormatStrFormatter('%.2f'))
# ax.zaxis.set_major_locator(MultipleLocator(0.03))
# ax.set_zticklabels(ax.get_zticklabels(),ha='right')
ax.set_xlim(0, 310000)
# ax.set_ylim(-16.00, 15.00)
# plt.xticks(fontsize=20)
# plt.yticks(fontsize=20)
# ax.set_zticks(fontsize=20)
plt.savefig('fig.png', bbox_inches='tight')
# plt.tight_layout()
plt.legend(loc=0, fontsize=20)  # 图例位置自
plt.show()


# ===========================================================================================================
def plot_learning_curves(history):
    plt.rcParams['font.size'] = 22
    pd.DataFrame(history.history).plot(figsize=(8, 7))
    # 网格显示
    # plt.grid(True)
    plt.xlabel("迭代次数")
    plt.ylabel("损失函数值")
    plt.gca().set_ylim(0, 1)
    plt.show()


plot_learning_curves(history)


# 误差计算==============================================================================================================

def mape(y_true, y_pred):
    return np.mean(np.abs((y_pred - y_true) / y_true)) * 100


print("UIUC数据Cd误差：")
mse_XFLR_Cd = mean_squared_error(y_test_all[:, 1], res[:, 1])
print("MSE of Cd:", mse_XFLR_Cd)
r2_XFLR_Cd = r2_score(y_test_all[:, 1], res[:, 1])
print("R2 of Cd:", r2_XFLR_Cd)
# print("MAPE of cd:", mape(y_test_all[:, 1], res[:, 1]))
print("*" * 100)

print("UIUC数据Cl误差：")
mse_XFLR_Cl = mean_squared_error(y_test_all[:, 0], res[:, 0])
print("MSE of Cl:", mse_XFLR_Cl)
r2_XFLR_Cl = r2_score(y_test_all[:, 0], res[:, 0])
print("R2 of Cl:", r2_XFLR_Cl)
# print("MAPE of cl:", mape(y_test_all[:, 0], res[:, 0]))

# 二维图==============================
unTrans_data = np.hstack((x_test_all, y_test_all, res))
unTrans_sorted_indices = np.argsort(unTrans_data[:, 0])  # 排序规则
unTrans_data = unTrans_data[unTrans_sorted_indices]

uh1 = len(x_test_all[x_test_all[:, 0] < 70000])
uh2 = len(x_test_all[x_test_all[:, 0] < 150000])
uh3 = len(x_test_all[x_test_all[:, 0] < 250000])

unTrans_res1 = np.array(unTrans_data )[:uh1, :]
unTrans_res2 = np.array(unTrans_data )[uh1:uh2, :]
unTrans_res3 = np.array(unTrans_data )[uh2:uh3, :]
unTrans_res4 = np.array(unTrans_data )[uh3:, :]

# 分别根据攻角排序
sorted_indices1 = np.argsort(unTrans_res1[:, 1])
sorted_indices2 = np.argsort(unTrans_res2[:, 1])
sorted_indices3 = np.argsort(unTrans_res3[:, 1])
sorted_indices4 = np.argsort(unTrans_res4[:, 1])

unTrans_res1 = unTrans_res1[sorted_indices1]
unTrans_res2 = unTrans_res2[sorted_indices2]
unTrans_res3 = unTrans_res3[sorted_indices3]
unTrans_res4 = unTrans_res4[sorted_indices4]

# 画图
plt.xlabel('α')
plt.ylabel('Cl')
plt.scatter(unTrans_res1[:, 1], unTrans_res1[:, 2], c="black", s=10)
plt.scatter(unTrans_res1[:, 1], unTrans_res1[:, 4], c="black", s=10)
plt.plot(unTrans_res1[:, 1], unTrans_res1[:, 2], c="black", label="测试点 Re=60400", linewidth=1)
plt.plot(unTrans_res1[:, 1], unTrans_res1[:, 4], c="red", label="预测点 Re=60400", linewidth=1)
plt.ylim(-0.75, 1.5)
plt.legend(loc=0)  # 图例位置自
plt.show()

plt.scatter(unTrans_res2[:, 1], unTrans_res2[:, 2], c="black", s=10)
plt.scatter(unTrans_res2[:, 1],unTrans_res2[:, 4], c="black", s=10)
plt.plot(unTrans_res2[:, 1], unTrans_res2[:, 2], c="black", label="测试点 Re=100800", linewidth=1)
plt.plot(unTrans_res2[:, 1], unTrans_res2[:, 4], c="red", label="预测点 Re=100800", linewidth=1)
plt.ylim(-0.75, 1.5)
plt.legend(loc=0, fontsize=10)  # 图例位置自
plt.show()

plt.scatter(unTrans_res3[:, 1], unTrans_res3[:, 2], c="black", s=10)
plt.scatter(unTrans_res3[:, 1], unTrans_res3[:, 4], c="black", s=10)
plt.plot(unTrans_res3[:, 1], unTrans_res3[:, 2], c="black", label="测试点 Re=201600", linewidth=1)
plt.plot(unTrans_res3[:, 1], unTrans_res3[:, 4], c="red", label="预测点 Re=201600", linewidth=1)
plt.ylim(-0.75, 1.5)
plt.legend(loc=0, fontsize=10)  # 图例位置自
plt.show()

plt.scatter(unTrans_res4[:, 1], unTrans_res4[:, 2], c="black", s=10)
plt.scatter(unTrans_res4[:, 1], unTrans_res4[:, 4], c="black", s=10)
plt.plot(unTrans_res4[:, 1], unTrans_res4[:, 2], c="black", label="测试点 Re=302700", linewidth=1)
plt.plot(unTrans_res4[:, 1], unTrans_res4[:, 4], c="red", label="预测点 Re=302700", linewidth=1)
plt.ylim(-0.75, 1.5)
plt.legend(loc=0, fontsize=10)  # 图例位置自
plt.show()

# =====================================================================

plt.xlabel('α')
plt.ylabel('Cd')
plt.scatter(unTrans_res1[:, 1], unTrans_res1[:, 3], c="black", s=10)
plt.scatter(unTrans_res1[:, 1], unTrans_res1[:, 5], c="black", s=10)
plt.plot(unTrans_res1[:, 1], unTrans_res1[:, 3], c="black", label="测试点 Re=60400", linewidth=1)
plt.plot(unTrans_res1[:, 1], unTrans_res1[:, 5], c="red", label="预测点 Re=60400", linewidth=1)
plt.ylim(0, 0.055)
plt.legend(loc=0, fontsize=10)  # 图例位置自
plt.show()

plt.scatter(unTrans_res2[:, 1], unTrans_res2[:, 3], c="black", s=10)
plt.scatter(unTrans_res2[:, 1], unTrans_res2[:, 5], c="black", s=10)
plt.plot(unTrans_res2[:, 1], unTrans_res2[:, 3], c="black", label="测试点 Re=100800", linewidth=1)
plt.plot(unTrans_res2[:, 1], unTrans_res2[:, 5], c="red", label="预测点 Re=100800", linewidth=1)
plt.ylim(0, 0.055)
plt.legend(loc=0, fontsize=10)  # 图例位置自
plt.show()

plt.scatter(unTrans_res3[:, 1], unTrans_res3[:, 3], c="black", s=10)
plt.scatter(unTrans_res3[:, 1], unTrans_res3[:, 5], c="black", s=10)
plt.plot(unTrans_res3[:, 1], unTrans_res3[:, 3], c="black", label="测试点 Re=201600", linewidth=1)
plt.plot(unTrans_res3[:, 1], unTrans_res3[:, 5], c="red", label="预测点 Re=201600", linewidth=1)
plt.ylim(0, 0.055)
plt.legend(loc=0, fontsize=10)  # 图例位置自
plt.show()

plt.scatter(unTrans_res4[:, 1], unTrans_res4[:, 3], c="black", s=10)
plt.scatter(unTrans_res4[:, 1], unTrans_res4[:, 5], c="black", s=10)
plt.plot(unTrans_res4[:, 1], unTrans_res4[:, 3], c="black", label="测试点 Re=302700", linewidth=1)
plt.plot(unTrans_res4[:, 1], unTrans_res4[:, 5], c="red", label="预测点 Re=302700", linewidth=1)
plt.ylim(0, 0.055)
plt.legend(loc=0, fontsize=10)  # 图例位置自
plt.show()