import math
import os
import pickle

import keras
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd  # 是python的一个数据分析包
import tensorflow as tf
from keras.layers import Dense
from keras.models import Sequential
from matplotlib.ticker import FormatStrFormatter
from mpl_toolkits.axes_grid1.inset_locator import inset_axes, mark_inset
from mpl_toolkits.mplot3d import Axes3D
from sklearn import preprocessing
from sklearn.metrics import r2_score
from tensorflow.python.keras.layers import LSTM, SimpleRNN

import AS_MFS_ALL
from custom_loss import mmd_loss

seed = 42
np.random.seed(seed)
tf.random.set_seed(seed)
#
# df1 = pd.read_excel("NACA 2414/Re=60400/XFLR_Re60400.xlsx")
# df2 = pd.read_excel("NACA 2414/Re=100800/XFLR_Re100800.xlsx")
# df3 = pd.read_excel('NACA 2414/Re=201600/XFLR_Re201600.xlsx')
# df4 = pd.read_excel("NACA 2414/Re=302700/XFLR_Re302700.xlsx")
# x1 = np.array(df1.iloc[:, [4, 0]])
# x2 = np.array(df2.iloc[:, [4, 0]])
# x3 = np.array(df3.iloc[:, [4, 0]])
# x4 = np.array(df4.iloc[:, [4, 0]])

# UIUC=======================================================================

df1 = pd.read_excel("NACA 2414/Re=60400/UIUC_CS_Re60400.xlsx", engine='openpyxl')
df2 = pd.read_excel("NACA 2414/Re=100800/UIUC_CS_Re100800.xlsx", engine='openpyxl')
df3 = pd.read_excel('NACA 2414/Re=201600/UIUC_CS_Re201600.xlsx', engine='openpyxl')
df4 = pd.read_excel("NACA 2414/Re=302700/UIUC_CS_Re302700.xlsx", engine='openpyxl')

# df1 = pd.read_excel("NACA 2415/Re=60000/UIUC_Re60000.xlsx", engine='openpyxl')
# df2 = pd.read_excel("NACA 2415/Re=101300/UIUC_Re101300.xlsx", engine='openpyxl')
# df3 = pd.read_excel('NACA 2415/Re=201900/UIUC_Re201900.xlsx', engine='openpyxl')
# df4 = pd.read_excel("NACA 2415/Re=303100/UIUC_Re303100.xlsx", engine='openpyxl')

# df1 = pd.read_excel("NACA 2415/Re=60000/XFLR_Re60000.xlsx", engine='openpyxl')
# df2 = pd.read_excel("NACA 2415/Re=101300/XFLR_Re101300.xlsx", engine='openpyxl')
# df3 = pd.read_excel('NACA 2415/Re=201900/XFLR_Re201900.xlsx', engine='openpyxl')
# df4 = pd.read_excel("NACA 2415/Re=303100/XFLR_Re303100.xlsx", engine='openpyxl')

x1 = np.array(df1.iloc[:, [3, 0]])
x2 = np.array(df2.iloc[:, [3, 0]])
x3 = np.array(df3.iloc[:, [3, 0]])
x4 = np.array(df4.iloc[:, [3, 0]])

# x1 = np.array(df1.iloc[:, [4, 0]])
# x2 = np.array(df2.iloc[:, [4, 0]])
# x3 = np.array(df3.iloc[:, [4, 0]])
# x4 = np.array(df4.iloc[:, [4, 0]])

x = np.concatenate((x1, x2, x3, x4))
# print(x)
# x = x[0:x.shape[0]:2]
# x = x4

y1 = np.array(df1.iloc[:, [4, 2]])
y2 = np.array(df2.iloc[:, [4, 2]])
y3 = np.array(df3.iloc[:, [4, 2]])
y4 = np.array(df4.iloc[:, [4, 2]])

# y1 = np.array(df1.iloc[:, 1])[:, np.newaxis]
# y2 = np.array(df2.iloc[:, 1)[:, np.newaxis]
# y3 = np.array(df3.iloc[:, 1)[:, np.newaxis]
# y4 = np.array(df4.iloc[:, 1)[:, np.newaxis]

y = np.concatenate((y1, y2, y3, y4))
# y = y[0:y.shape[0]:2]

# 不同工况预测（外插）=====================
# x_train = np.concatenate((x1, x2, x4))
# x_test_all = x3
# y_train = np.concatenate((y1, y2, y4))
# y_test_all = y3

# y = y4
print(x)
print(y)


# ========================================================================


def error(y_predict, y_test):  # 定义计算误差平方和函数，，，传入的是估算出的值，和测试值，，这里仅仅是用来定义的，方便后面的调用。
    sum = 0
    lens = len(y_predict)
    for i in range(lens):
        e = (y_predict[i] - y_test[i]) ** 2
        # errs.append(e)
        sum += e
    return math.sqrt(sum) / lens


def error_MAE(y_predict, y_test):  # 定义计算误差平方和函数，，，传入的是估算出的值，和测试值，，这里仅仅是用来定义的，方便后面的调用。
    sum = 0
    lens = len(y_predict)
    for i in range(lens):
        e = abs(y_predict[i] - y_test[i])
        # errs.append(e)
        sum += e
    return math.sqrt(sum) / lens


# x_train = np.concatenate((x1, x3, x4))
# y_train = np.concatenate((y1, y3, y4))
# x_test_all = x2
# y_test_all = y2

# Load Data
# names1 = ['alpha', 'CL']
# dat1 = pd.read_csv('./NACA6409-CL/UIUC-NACA-6409.txt', skiprows=[0], names=names1, header=None, sep='\s+',
#                    usecols=[0, 1])
# print(dat1)
# x0 = np.mat(np.array(dat1)[:, 0])
# x = x0.reshape(x0.shape[0], 1)
# y0 = np.mat(np.array(dat1)[:, 1])
# y = y0.reshape(y0.shape[0], 1)

# 定义数据
## 均匀划分训练集和测试集


# 二分之一训练=======================================================================
# x_train = x[0:x.shape[0]:2]
# x_test_all = x[1:x.shape[0]:2]
# y_train = y[0:y.shape[0]:2]
# y_test_all = y[1:y.shape[0]:2]

#  三分之一训练====================================================================
# x_train = x[0:x.shape[0]:3]
# # shuffle_ix = np.random.permutation(np.arange(len(x_train)))
# # x_train = x_train[shuffle_ix]
# X1 = x[1:x.shape[0]:3]
# X2 = x[2:x.shape[0]:3]
# X3 = np.concatenate((X1, X2))
# x_test_all = X3
#
# y_train = y[0:y.shape[0]:3]
# # y_train = y_train[shuffle_ix]
# Y1 = y[1:y.shape[0]:3]
# Y2 = y[2:y.shape[0]:3]
# Y3 = np.concatenate((Y1, Y2))
# y_test_all = Y3

# 1/4做训练====================================================================
x_train = x[0:x.shape[0]:4]
# shuffle_ix = np.random.permutation(np.arange(len(x_train)))# x_train = x_train[shuffle_ix]
X1 = x[1:x.shape[0]:4]
X2 = x[2:x.shape[0]:4]
X3 = x[3:x.shape[0]:4]
X4 = np.concatenate((X1, X2, X3))
x_test_all = X4

y_train = y[0:y.shape[0]:4]
# y_train = y_train[shuffle_ix]
Y1 = y[1:y.shape[0]:4]
Y2 = y[2:y.shape[0]:4]
Y3 = y[3:y.shape[0]:4]
Y4 = np.concatenate((Y1, Y2, Y3))
y_test_all = Y4

# ================================================================

# 1/5训练=============================================================
# x_train = x[0:x.shape[0]:5]
# X1 = x[1:x.shape[0]:5]
# X2 = x[2:x.shape[0]:5]
# X3 = x[3:x.shape[0]:5]
# X4 = x[4:x.shape[0]:5]
# X5 = np.concatenate((X1, X2, X3, X4))
# x_test_all = X5
#
# y_train = y[0:y.shape[0]:5]
#
# Y1 = y[1:y.shape[0]:5]
# Y2 = y[2:y.shape[0]:5]
# Y3 = y[3:y.shape[0]:5]
# Y4 = y[4:y.shape[0]:5]
# Y5 = np.concatenate((Y1, Y2, Y3, Y4))
# y_test_all = Y5

# 1/6=================================
# x_train = x[3:x.shape[0]:6]
# X1 = x[0:x.shape[0]:6]
# X2 = x[1:x.shape[0]:6]
# X3 = x[2:x.shape[0]:6]
# X4 = x[4:x.shape[0]:6]
# X5 = x[5:x.shape[0]:6]
# x_test_all = np.concatenate((X1, X2, X3, X4, X5))
#
# y_train = y[3:y.shape[0]:6]
#
# Y1 = y[0:y.shape[0]:6]
# Y2 = y[1:y.shape[0]:6]
# Y3 = y[2:y.shape[0]:6]
# Y4 = y[4:y.shape[0]:6]
# Y5 = y[5:y.shape[0]:6]
# y_test_all = np.concatenate((Y1, Y2, Y3, Y4, Y5))

# 1/7================================================================
# x_train = x[3:x.shape[0]:7]
# X1 = x[0:x.shape[0]:7]
# X2 = x[1:x.shape[0]:7]
# X3 = x[2:x.shape[0]:7]
# X4 = x[4:x.shape[0]:7]
# X5 = x[5:x.shape[0]:7]
# X6 = x[6:x.shape[0]:7]
# x_test_all = np.concatenate((X1, X2, X3, X4, X5, X6))
#
# y_train = y[3:y.shape[0]:7]
# Y1 = y[0:y.shape[0]:7]
# Y2 = y[1:y.shape[0]:7]
# Y3 = y[2:y.shape[0]:7]
# Y4 = y[4:y.shape[0]:7]
# Y5 = y[5:y.shape[0]:7]
# Y6 = y[6:y.shape[0]:7]
# y_test_all = np.concatenate((Y1, Y2, Y3, Y4, Y5, Y6))

# 1/8================================================================
# x_train = x[0:x.shape[0]:8]
# X1 = x[1:x.shape[0]:8]
# X2 = x[2:x.shape[0]:8]
# X3 = x[3:x.shape[0]:8]
# X4 = x[4:x.shape[0]:8]
# X5 = x[5:x.shape[0]:8]
# X6 = x[6:x.shape[0]:8]
# X7 = x[7:x.shape[0]:8]
# x_test_all = np.concatenate((X1, X2, X3, X4, X5, X6, X7))
#
# y_train = y[0:y.shape[0]:8]
# Y1 = y[1:y.shape[0]:8]
# Y2 = y[2:y.shape[0]:8]
# Y3 = y[3:y.shape[0]:8]
# Y4 = y[4:y.shape[0]:8]
# Y5 = y[5:y.shape[0]:8]
# Y6 = y[6:y.shape[0]:8]
# Y7 = y[7:y.shape[0]:8]
# y_test_all = np.concatenate((Y1, Y2, Y3, Y4, Y5, Y6, Y7))

# 1/9=========================================================
# x_train = x[4:x.shape[0]:9]
# X1 = x[0:x.shape[0]:9]
# X2 = x[1:x.shape[0]:9]
# X3 = x[2:x.shape[0]:9]
# X4 = x[3:x.shape[0]:9]
# X5 = x[5:x.shape[0]:9]
# X6 = x[6:x.shape[0]:9]
# X7 = x[7:x.shape[0]:9]
# X8 = x[8:x.shape[0]:9]
# x_test_all = np.concatenate((X1, X2, X3, X4, X5, X6, X7, X8))
#
# y_train = y[4:y.shape[0]:9]
# Y1 = y[0:y.shape[0]:9]
# Y2 = y[1:y.shape[0]:9]
# Y3 = y[2:y.shape[0]:9]
# Y4 = y[3:y.shape[0]:9]
# Y5 = y[5:y.shape[0]:9]
# Y6 = y[6:y.shape[0]:9]
# Y7 = y[7:y.shape[0]:9]
# Y8 = y[8:y.shape[0]:9]
# y_test_all = np.concatenate((Y1, Y2, Y3, Y4, Y5, Y6, Y7, Y8))

# 1/10=============================================================
# x_train = x[0:x.shape[0]:10]
# X1 = x[1:x.shape[0]:10]
# X2 = x[2:x.shape[0]:10]
# X3 = x[3:x.shape[0]:10]
# X4 = x[4:x.shape[0]:10]
# X5 = x[5:x.shape[0]:10]
# X6 = x[6:x.shape[0]:10]
# X7 = x[7:x.shape[0]:10]
# X8 = x[8:x.shape[0]:10]
# X9 = x[9:x.shape[0]:10]
# x_test_all = np.concatenate((X1, X2, X3, X4, X5, X6, X7, X8, X9))
#
# y_train = y[0:y.shape[0]:10]
# Y1 = y[1:y.shape[0]:10]
# Y2 = y[2:y.shape[0]:10]
# Y3 = y[3:y.shape[0]:10]
# Y4 = y[4:y.shape[0]:10]
# Y5 = y[5:y.shape[0]:10]
# Y6 = y[6:y.shape[0]:10]
# Y7 = y[7:y.shape[0]:10]
# Y8 = y[8:y.shape[0]:10]
# Y9 = y[9:y.shape[0]:10]
# y_test_all = np.concatenate((Y1, Y2, Y3, Y4, Y5, Y6, Y7, Y8, Y9))

# =================================================================

# x_valid = x_test_all[0:x_test_all.shape[0]:2]
# x_test = x_test_all[1:x_test_all.shape[0]:2]
# y_valid = y_test_all[0:y_test_all.shape[0]:2]
# y_test = y_test_all[1:y_test_all.shape[0]:2]

# =========================================================================


# ## 随机划分训练集和测试集
# x_train_all, x_test, y_train_all, y_test = train_test_split(x, y, test_size=0.2)
# x_train, x_valid, y_train, y_valid = train_test_split(
#     x_train_all, y_train_all, test_size=0.2)
# x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.3)

# 50%===================================
# x_valid = x_train[1:x_train.shape[0]:2]
# x_train = x_train[0:x_train.shape[0]:2]
#
# y_valid = y_train[1:y_train.shape[0]:2]
# y_train = y_train[0:y_train.shape[0]:2]

# 记录train
XTrain = x_train
YTrain = y_train

# train的20%做验证=========================================
x_valid = x_train[3:x_train.shape[0]:5]
x_train1 = x_train[0:x_train.shape[0]:5]
x_train2 = x_train[1:x_train.shape[0]:5]
x_train3 = x_train[2:x_train.shape[0]:5]
x_train4 = x_train[4:x_train.shape[0]:5]
x_train5 = np.concatenate((x_train1, x_train2, x_train3, x_train4))
x_train = x_train5

y_valid = y_train[3:y_train.shape[0]:5]
y_train1 = y_train[0:y_train.shape[0]:5]
y_train2 = y_train[1:y_train.shape[0]:5]
y_train3 = y_train[2:y_train.shape[0]:5]
y_train4 = y_train[4:y_train.shape[0]:5]
y_train5 = np.concatenate((y_train1, y_train2, y_train3, y_train4))
y_train = y_train5

# 25%
# x_valid = x_train[2:x_train.shape[0]:4]
# x_train1 = x_train[0:x_train.shape[0]:4]
# x_train2 = x_train[1:x_train.shape[0]:4]
# x_train3 = x_train[3:x_train.shape[0]:4]
# x_train5 = np.concatenate((x_train1, x_train2, x_train3))
# x_train = x_train5
#
# y_valid = y_train[2:y_train.shape[0]:4]
# y_train1 = y_train[0:y_train.shape[0]:4]
# y_train2 = y_train[1:y_train.shape[0]:4]
# y_train3 = y_train[3:y_train.shape[0]:4]
# y_train5 = np.concatenate((y_train1, y_train2, y_train3))
# y_train = y_train5

# 30%
# x_valid = x_train[1:x_train.shape[0]:3]
# x_train1 = x_train[0:x_train.shape[0]:3]
# x_train2 = x_train[2:x_train.shape[0]:3]
# x_train5 = np.concatenate((x_train1, x_train2))
# x_train = x_train5
#
# y_valid = y_train[1:y_train.shape[0]:3]
# y_train1 = y_train[0:y_train.shape[0]:3]
# y_train2 = y_train[2:y_train.shape[0]:3]
# y_train5 = np.concatenate((y_train1, y_train2))
# y_train = y_train5
# =====================================================

# Z-Score标准化
x_scaler = preprocessing.StandardScaler().fit(x)  # 建立StandardScaler对象
y_scaler = preprocessing.StandardScaler().fit(y)  # 建立StandardScaler对象
x_train_st = x_scaler.transform(x_train)  # StandardScaler标准化处理
y_train_st = y_scaler.transform(y_train)  # StandardScaler标准化处理
# x_test_st = x_scaler.transform(x_test)  # StandardScaler标准化处理
# y_test_st = y_scaler.transform(y_test)  # StandardScaler标准化处理
x_test_st = x_scaler.transform(x_test_all)  # StandardScaler标准化处理
y_test_st = y_scaler.transform(y_test_all)  # StandardScaler标准化处理

x_valid_scaled = x_scaler.transform(x_valid)
y_valid_scaled = y_scaler.transform(y_valid)

# reshape input=======================
x_train_st = x_train_st.reshape((x_train_st.shape[0], 1, x_train_st.shape[1]))
x_test_st = x_test_st.reshape((x_test_st.shape[0], 1, x_test_st.shape[1]))
x_valid_scaled = x_valid_scaled.reshape((x_valid_scaled.shape[0], 1, x_valid_scaled.shape[1]))

# 预测大量数据============================================================
# 输入：

#
# inX1 = np.linspace(x1[0], x1[20], 100)
# inX2 = np.linspace(x2[0], x2[20], 100)
# inX3 = np.linspace(x3[0], x3[20], 100)
# inX4 = np.linspace(x4[0], x4[20], 100)
# inX = np.concatenate((inX1, inX2, inX3, inX4))
#
# inX1_st = x_scaler.transform(inX1)
# inX2_st = x_scaler.transform(inX2)
# inX3_st = x_scaler.transform(inX3)
# inX4_st = x_scaler.transform(inX4)
#
# inX1_st = inX1_st.reshape((inX1_st.shape[0], 1, inX1_st.shape[1]))
# inX2_st = inX2_st.reshape((inX2_st.shape[0], 1, inX2_st.shape[1]))
# inX3_st = inX3_st.reshape((inX3_st.shape[0], 1, inX3_st.shape[1]))
# inX4_st = inX4_st.reshape((inX4_st.shape[0], 1, inX4_st.shape[1]))

# 目标域数据=========================================================
Tx = y
Ty = x

Tx_train = x_train
Tx_test_all = x_test_all

Ty_train = y_train
Ty_test_all = y_test_all

# 验证集
Tx_valid = x_valid
Ty_valid = y_valid

# Z-Score标准化
Tx_scaler = x_scaler  # 建立StandardScaler对象

Ty_scaler = y_scaler  # 建立StandardScaler对象

Tx_train_st = x_train_st  # StandardScaler标准化处理
Ty_train_st = y_train_st  # StandardScaler标准化处理

Tx_test_st = x_test_st  # StandardScaler标准化处理
Ty_test_st = y_test_st  # StandardScaler标准化处理

Tx_valid_scaled = x_valid_scaled
Ty_valid_scaled = y_valid_scaled

# TinX1 = inX1_st
# TinX2 = inX2_st
# TinX3 = inX3_st
# TinX4 = inX4_st

# =============================================================
# 定义模型
# UNTransfer
units = 13
model = Sequential()
# model.add(Dense(units=64, activation='relu', input_dim=2, name="layer1"))
model.add(LSTM(units=64, input_shape=(x_train_st.shape[1], x_train_st.shape[2]), activation="relu", name="LSTM"))
# model.add(SimpleRNN(units=64, input_shape=(x_train_st.shape[1], x_train_st.shape[2]), activation="relu", name="LSTM"))
model.add(Dense(units=units, activation='relu', name="layer2"))
# model.add(BatchNormalization(epsilon=0.001, center=True))
# model.add(Dropout(0.5))
model.add(Dense(units=units, activation='relu', name="layer3"))
# model.add(Dense(units=units, activation='relu', name="layer4"))
# model.add(Dense(units=units, activation='relu', name="layer5"))
# model.add(Dense(units=units, activation='relu', name="layer6"))
# model.add(Dense(units=units, activation='relu', name="layer7"))
# model.add(Dense(units=units, activation='relu', name='layer8'))
# model.add(Dense(units=units, activation='relu', name='layer9'))
# model.add(Dense(units=units, activation='relu', name='layer10'))

model.add(Dense(units=2, input_dim=units))

# keras.initializers.glorot_normal(seed=None)

logdir = os.path.abspath('.') + os.sep + "2_dnn-bn-callbacks"  # 反正不要出现斜杠表示路径
if not os.path.exists(logdir):
    os.mkdir(logdir)
output_model_file = os.path.abspath('.') + os.sep + "2_dnn-bn-callbacks" + os.sep + "fashion_mnist_model.h5"
output_scale = os.path.abspath('.') + os.sep + "2_dnn-bn-callbacks" + os.sep + "scale_model.txt"
with open(output_scale, 'wb') as fo:
    pickle.dump([x_scaler, y_scaler], fo, fix_imports=True)
# model.compile(loss='mean_squared_error', optimizer='sgd')

callbacks = [
    keras.callbacks.TensorBoard(logdir),  # 保存在logdir中
    keras.callbacks.ModelCheckpoint(output_model_file,  # 模型名称
                                    save_best_only=True),  # 保存一个最好的模型
    # keras.callbacks.EarlyStopping(patience=5, min_delta=1e-3),#如果连续5次迭代，loss值达到最小误差1e-3，便停止训练
]
# 定义训练需要的参数，compile集成了loss，optimizer
model.compile(loss='mse',  # 定义损失函数
              optimizer='nadam',  # 定义优化器
              metrics=['mse', 'mae'])  # 定义评估指标

history = model.fit(x_train_st, y_train_st, epochs=1000, validation_data=(x_valid_scaled, y_valid_scaled),
                    callbacks=callbacks,
                    batch_size=8)  # fit开始训练
# ================================================================================================
##模型结果保存
pkl_filename = os.path.abspath('.') + 'best4_pickle_model_0_10.h5'
model.save(pkl_filename)

model.save_weights("UnTransfer_BP_model_weights.h5")
# predict = model.predict(x_test_st, 30)
# =====================================================================
predict = model.predict(x_test_st, 30)

# x_test_st = x_test_st.reshape((x_test_st.shape[0], x_test_st.shape[2]))
# 大量预测数据=====================================================
# outY1 = model.predict(inX1_st)
# outY2 = model.predict(inX2_st)
# outY3 = model.predict(inX3_st)
# outY4 = model.predict(inX4_st)
#
# outY1 = y_scaler.inverse_transform(outY1)
# outY2 = y_scaler.inverse_transform(outY2)
# outY3 = y_scaler.inverse_transform(outY3)
# outY4 = y_scaler.inverse_transform(outY4)

# ==============================================================


# =====================================================================
# predict.append(res)
res = y_scaler.inverse_transform(predict)

plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False
# 4. 评估模型
# eval = model.evaluate(x_test_st, y_test_st, verbose=0)
# =====================================================================
# eval = model.evaluate(x_test_st, y_test_st, verbose=0)
# # =====================================================================
# print("Evaluation on test data: loss = %0.6f accuracy = %0.2f%% \n" \
#       % (eval[0], eval[1] * 100))
# plt.rcParams['font.size'] = 20
# X = np.linspace(0, np.array(x_test).shape[0], np.array(x_test).shape[0], endpoint=True)

# # =============================================================================================
# plt.xlabel('样本点编号')
# plt.ylabel('CA'),
# plt.scatter(x_train[:, 0], y_train, c="k", label="target_train")
# plt.plot(x_test_all[:, 0], y_test_all, c="#800080", label="target_test", linewidth=2)
# # plt.plot(x_test, res[:, 0], color="blue", linewidth=1.0, linestyle="-", label='预测值')
# # plt.plot(x_test, y_test[:, 0], color="#800080", linewidth=2.0, linestyle="--", label='原始数据值')
# plt.plot(x_test_all[:, 0], res, color="blue", linewidth=1.0, linestyle="-", label='预测值')
# # plt.plot(x_test, y_test, color="#800080", linewidth=2.0, linestyle="--", label='原始数据值')
#
# plt.legend(loc=0)
# plt.show()
# # ================================================================================================
title = "1/8风洞数据用于训练"
# ================================================================================================================
figCd_UnTransfer = plt.figure()
ax = Axes3D(figCd_UnTransfer)
ax.scatter(np.array(XTrain)[:, 0], np.array(XTrain)[:, 1], YTrain[:, 1], s=50, c='black', marker='.',
           alpha=0.5,
           label='训练点(风洞)')
ax.scatter(np.array(x_test_all)[:, 0], np.array(x_test_all)[:, 1], y_test_all[:, 1], s=200, c='red', marker='.',
           alpha=0.5,
           label='测试点(风洞)')
ax.scatter(np.array(x_test_all)[:, 0], np.array(x_test_all)[:, 1], res[:, 1], s=150, c='blue', marker='+', alpha=0.5,
           label='预测点(LSTM)')
# ax.set_title(title)
ax.set_xlabel("    Re", fontsize=15)
ax.set_ylabel("   α/(°)", fontsize=15)
ax.set_zlabel(" Cd", fontsize=15)
ax.xaxis.set_major_formatter(FormatStrFormatter('%.0f'))
ax.zaxis.set_major_formatter(FormatStrFormatter('%.2f'))
# ax.zaxis.set_major_ax.set_xlim(0, 310000)locator(MultipleLocator(0.03))
# ax.set_zticklabels(ax.get_zticklabels(),ha='right')
ax.set_xlim(0, 310000)
plt.title(title, fontsize=15)
# 坐标轴数字大小
plt.tick_params(labelsize=10)
labels = ax.get_xticklabels() + ax.get_yticklabels() + ax.get_zticklabels()
# ax.set_ylim(-16.00, 15.00)

# plt.xticks(fontsize=20)
# plt.yticks(fontsize=20)
# ax.set_zticks(fontsize=20)
ax.view_init(azim=-34, elev=28)
# plt.tight_layout()
# 图例说明
plt.legend(loc=0, fontsize=10, frameon=False)  #
figCd_UnTransfer.savefig('figCd_UnTransfer.svg', bbox_inches='tight')
plt.show()

# =========================================================================================
figCl_Cd_UnTransfer = plt.figure()
ax = Axes3D(figCl_Cd_UnTransfer)
ax.scatter(np.array(XTrain)[:, 0], np.array(XTrain)[:, 1], YTrain[:, 0], s=50, c='black', marker='.',
           alpha=0.5,
           label='训练点(风洞)')
ax.scatter(np.array(x_test_all)[:, 0], np.array(x_test_all)[:, 1], y_test_all[:, 0], s=200, c='red', marker='.',
           alpha=0.5,
           label='测试点(风洞)')
ax.scatter(np.array(x_test_all)[:, 0], np.array(x_test_all)[:, 1], res[:, 0], s=150, c='blue', marker='+', alpha=0.5,
           label='预测点(LSTM)')
# ax.set_title(title)
ax.set_xlabel("    Re", fontsize=15)
ax.set_ylabel("   α/(°)", fontsize=15)
ax.set_zlabel(" Cl/Cd", fontsize=15)

plt.tick_params(labelsize=10)
labels = ax.get_xticklabels() + ax.get_yticklabels() + ax.get_zticklabels()

ax.xaxis.set_major_formatter(FormatStrFormatter('%.0f'))
ax.zaxis.set_major_formatter(FormatStrFormatter('%.2f'))
# ax.zaxis.set_major_locator(MultipleLocator(0.03))
# ax.set_zticklabels(ax.get_zticklabels(),ha='right')
ax.set_xlim(0, 310000)
plt.title(title, fontsize=15)
# ax.set_ylim(-16.00, 15.00)
# plt.xticks(fontsize=20)
# plt.yticks(fontsize=20)
# ax.set_zticks(fontsize=20)
ax.view_init(azim=-154, elev=28)
# plt.tight_layout()
plt.legend(loc=0, fontsize=10, frameon=False)  # 图例位置自
figCl_Cd_UnTransfer.savefig('figCl_Cd_UnTransfer.svg', bbox_inches='tight')
plt.show()


# # ============================================================================================================
def plot_learning_curves(history):
    plt.rcParams['font.size'] = 22
    pd.DataFrame(history.history).plot(figsize=(8, 7))
    # 网格显示
    # plt.grid(True)
    plt.xlabel("迭代次数")
    plt.ylabel("损失函数值")
    plt.gca().set_ylim(0, 1)
    plt.show()


plot_learning_curves(history)

# 定义模型===============================================================
# Transfer
model = Sequential()
# model.add(Dense(units=64, activation='relu', input_dim=2, name="input"))
model.add(LSTM(units=64, input_shape=(x_train_st.shape[1], x_train_st.shape[2]), activation="relu", name="LSTM"))
# model.add(SimpleRNN(units=64, input_shape=(x_train_st.shape[1], x_train_st.shape[2]), activation="relu", name="LSTM"))
model.add(Dense(units=units, activation='relu', name="layer2"))
# model.add(BatchNormalization(epsilon=0.001, center=True))
# model.add(Dropout(0.3))
model.add(Dense(units=units, activation='relu', name="layer3"))
# model.add(Dense(units=units, activation='relu', name="4"))
# model.add(Dense(units=u, activation='relu', name="layer5"))
# model.add(Dense(units=u, activation='relu', name="layer6"))
# model.add(Dense(units=u, activation='relu', name="layer7"))
# model.add(Dense(units=u, activation='relu', name='layer8'))
# model.add(Dense(units=u, activation='relu', name='layer9'))
# model.add(Dense(units=u, activation='relu', name='layer10'))

model.add(Dense(units=2, input_dim=units, name="out"))

# ==========================================================================================
model.load_weights("BP_model_weights.h5", by_name=True)

# ==============================================================================================
for layer in model.layers:
    for layer in model.layers[:2]:
        layer.trainable = False
# ==================================================================================================


logdir = os.path.abspath('.') + os.sep + "3_dnn-bn-callbacks"  # 反正不要出现斜杠表示路径
if not os.path.exists(logdir):
    os.mkdir(logdir)
output_model_file = os.path.abspath('.') + os.sep + "3_dnn-bn-callbacks" + os.sep + "fashion_mnist_model.h5"
output_scale = os.path.abspath('.') + os.sep + "3_dnn-bn-callbacks" + os.sep + "scale_model.txt"
with open(output_scale, 'wb') as fo:
    pickle.dump([Tx_scaler, Ty_scaler], fo, fix_imports=True)
# model.compile(loss='mean_squared_error', optimizer='sgd')

callbacks = [
    keras.callbacks.TensorBoard(logdir),  # 保存在logdir中
    keras.callbacks.ModelCheckpoint(output_model_file,  # 模型名称
                                    save_best_only=True),  # 保存一个最好的模型
    # keras.callbacks.EarlyStopping(patience=5, min_delta=1e-3),#如果连续5次迭代，loss值达到最小误差1e-3，便停止训练
]
# 定义训练需要的参数，compile集成了loss，optimizer
model.compile(loss='mse',  # 定义损失函数
              optimizer='nadam',  # 定义优化器,非常低的学习率
              metrics=['mse', 'mae'])  # 定义评估指标

history = model.fit(Tx_train_st, Ty_train_st, epochs=1000, validation_data=(Tx_valid_scaled, Ty_valid_scaled),
                    callbacks=callbacks,
                    batch_size=8)  # fit开始训练
# ================================================================================================
##模型结果保存
pkl_filename = os.path.abspath('.') + 'best4_pickle_model_0_10.h5'
model.save(pkl_filename)

model.save_weights("Transfer_model_weights_transfer.h5")

#  微调===========================================================================
model = Sequential()
# model.add(Dense(units=64, activation='relu', input_dim=2, name="input"))
model.add(LSTM(units=64, input_shape=(x_train_st.shape[1], x_train_st.shape[2]), activation="relu", name="LSTM"))
# model.add(SimpleRNN(units=64, input_shape=(x_train_st.shape[1], x_train_st.shape[2]), activation="relu", name="LSTM"))
model.add(Dense(units=units, activation='relu', name="layer2"))
# model.add(BatchNormalization(epsilon=0.001, center=True))
# model.add(Dropout(0.3))
model.add(Dense(units=units, activation='relu', name="layer3"))
# model.add(Dense(units=units, activation='relu', name="4"))
# model.add(Dense(units=u, activation='relu', name="layer5"))
# model.add(Dense(units=u, activation='relu', name="layer6"))
# model.add(Dense(units=u, activation='relu', name="layer7"))
# model.add(Dense(units=u, activation='relu', name='layer8'))
# model.add(Dense(units=u, activation='relu', name='layer9'))
# model.add(Dense(units=u, activation='relu', name='layer10'))

model.add(Dense(units=2, input_dim=units, name="out"))

# ==========================================================================================
model.load_weights("Transfer_model_weights_transfer.h5", by_name=True)

# =============================================================================================
# for layer in model.layers:
#     for layer in model.layers[1:3]:
#         layer.trainable = True

# ==================================================================================================


logdir = os.path.abspath('.') + os.sep + "3_dnn-bn-callbacks"  # 反正不要出现斜杠表示路径
if not os.path.exists(logdir):
    os.mkdir(logdir)
output_model_file = os.path.abspath('.') + os.sep + "3_dnn-bn-callbacks" + os.sep + "fashion_mnist_model.h5"
output_scale = os.path.abspath('.') + os.sep + "3_dnn-bn-callbacks" + os.sep + "scale_model.txt"
with open(output_scale, 'wb') as fo:
    pickle.dump([Tx_scaler, Ty_scaler], fo, fix_imports=True)
# model.compile(loss='mean_squared_error', optimizer='sgd')

callbacks = [
    keras.callbacks.TensorBoard(logdir),  # 保存在logdir中
    keras.callbacks.ModelCheckpoint(output_model_file,  # 模型名称
                                    save_best_only=True),  # 保存一个最好的模型
    # keras.callbacks.EarlyStopping(patience=5, min_delta=1e-3),#如果连续5次迭代，loss值达到最小误差1e-3，便停止训练
]
# 定义训练需要的参数，compile集成了loss，optimizer
model.compile(loss='mse',  # 定义损失函数
              optimizer=keras.optimizers.Nadam(1e-3),  # 定义优化器,非常低的学习率
              metrics=['mse', 'mae'])  # 定义评估指标

history = model.fit(Tx_train_st, Ty_train_st, epochs=1000, validation_data=(Tx_valid_scaled, Ty_valid_scaled),
                    callbacks=callbacks,
                    batch_size=8)  # fit开始训练

# 微调end==================================================================================================


Tpredict = model.predict(Tx_test_st, 30)

# 大量预测数据=====================================================
# ToutY1 = model.predict(inX1_st)
# ToutY2 = model.predict(inX2_st)
# ToutY3 = model.predict(inX3_st)
# ToutY4 = model.predict(inX4_st)
#
# ToutY1 = y_scaler.inverse_transform(ToutY1)
# ToutY2 = y_scaler.inverse_transform(ToutY2)
# ToutY3 = y_scaler.inverse_transform(ToutY3)
# ToutY4 = y_scaler.inverse_transform(ToutY4)
# =====================================================================
# predict.append(res)
Tres = Ty_scaler.inverse_transform(Tpredict)

plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False
# 4. 评估模型

# =====================================================================
# Teval = model.evaluate(Tx_test_st, Ty_test_st, verbose=0)
# # =====================================================================
# print("Evaluation on test data: loss = %0.6f accuracy = %0.2f%% \n" \
#       % (Teval[0], Teval[1] * 100))

# ================================================================================================================

figCd_Transfer = plt.figure()
ax = Axes3D(figCd_Transfer)
ax.scatter(np.array(XTrain)[:, 0], np.array(XTrain)[:, 1], YTrain[:, 1], s=50, c='black', marker='.',
           alpha=0.5,
           label='训练点(风洞)')
ax.scatter(np.array(Tx_test_all)[:, 0], np.array(Tx_test_all)[:, 1], Ty_test_all[:, 1], s=200, c='red', marker='.',
           alpha=0.5,
           label='测试点(风洞)')
ax.scatter(np.array(Tx_test_all)[:, 0], np.array(Tx_test_all)[:, 1], Tres[:, 1], s=150, c='blue', marker='+', alpha=0.5,
           label='预测点(迁移)')

# ax.set_title(title)
ax.set_xlabel("    Re", fontsize=15)
ax.set_ylabel("   α/(°)", fontsize=15)
ax.set_zlabel(" Cd", fontsize=15)

ax.xaxis.set_major_formatter(FormatStrFormatter('%.0f'))
ax.zaxis.set_major_formatter(FormatStrFormatter('%.2f'))
# ax.zaxis.set_major_locator(MultipleLocator(0.03))
# ax.set_zticklabels(ax.get_zticklabels(),ha='right')
ax.set_xlim(0, 310000)
plt.title(title, fontsize=15)
# 坐标轴数字大小
plt.tick_params(labelsize=10)
labels = ax.get_xticklabels() + ax.get_yticklabels() + ax.get_zticklabels()

# ax.set_ylim(-16.00, 15.00)
# plt.xticks(fontsize=20)
# plt.yticks(fontsize=20)
# ax.set_zticks(fontsize=20)
ax.view_init(azim=-34, elev=28)
# plt.tight_layout()
plt.legend(loc=0, fontsize=10, frameon=False)  # 图例位置自
figCd_Transfer.savefig('figCd_Transfer.svg', bbox_inches='tight')
plt.show()

# =============================================================================================
figCl_Cd_Transfer = plt.figure()
ax = Axes3D(figCl_Cd_Transfer)
ax.scatter(np.array(XTrain)[:, 0], np.array(XTrain)[:, 1], YTrain[:, 0], s=50, c='black', marker='.',
           alpha=0.5,
           label='训练点(风洞)')
ax.scatter(np.array(Tx_test_all)[:, 0], np.array(Tx_test_all)[:, 1], Ty_test_all[:, 0], s=200, c='red', marker='.',
           alpha=0.5,
           label='测试点(风洞)')
ax.scatter(np.array(Tx_test_all)[:, 0], np.array(Tx_test_all)[:, 1], Tres[:, 0], s=150, c='blue', marker='+', alpha=0.5,
           label='预测点(迁移)')
# ax.set_title(title)
ax.set_xlabel("    Re", size=15)
ax.set_ylabel("   α/(°)", fontsize=15)
ax.set_zlabel(" Cl/Cd", size=15)

plt.tick_params(labelsize=10)
labels = ax.get_xticklabels() + ax.get_yticklabels() + ax.get_zticklabels()

ax.xaxis.set_major_formatter(FormatStrFormatter('%.0f'))
ax.zaxis.set_major_formatter(FormatStrFormatter('%.2f'))
# ax.zaxis.set_major_locator(MultipleLocator(0.03))
# ax.set_zticklabels(ax.get_zticklabels(),ha='right')
ax.set_xlim(0, 310000)
plt.title(title, fontsize=15)
# ax.set_ylim(-16.00, 15.00)
# plt.xticks(fontsize=20)
# plt.yticks(fontsize=20)
# ax.set_zticks(fontsize=20)
ax.view_init(azim=-154, elev=28)
# plt.tight_layout()
plt.legend(loc=0, fontsize=10, frameon=False)  # 图例位置自
figCl_Cd_Transfer.savefig('figCl_Cd_Transfer.svg', bbox_inches='tight')
plt.show()


def plot_learning_curves(history):
    # plt.rcParams['font.size'] = 22
    pd.DataFrame(history.history).plot(figsize=(8, 7))
    plt.grid(True)
    plt.xlabel("迭代次数")
    plt.ylabel("损失函数值")
    plt.gca().set_ylim(0, 1)
    plt.show()


plot_learning_curves(history)


# 误差计算==============================================================================================================

def mape(y_true, y_pred):
    return np.mean(np.abs((y_pred - y_true) / y_true)) * 100


def error(y_predict, y_test):  # 定义计算误差平方和函数，，，传入的是估算出的值，和测试值，，这里仅仅是用来定义的，方便后面的调用。
    sum = 0
    lens = len(y_predict)
    for i in range(lens):
        e = (y_predict[i] - y_test[i]) ** 2
        # errs.append(e)
        sum += e
    return math.sqrt(sum) / lens


print("UIUC数据Cl/Cd误差（UnTransfer）：")
mse_XFLR_Cl = error(y_test_all[:, 0], res[:, 0])
print("MSE of Cl/Cd:", mse_XFLR_Cl)
r2_XFLR_Cl = r2_score(y_test_all[:, 0], res[:, 0])
print("R2 of Cl/Cd:", r2_XFLR_Cl)
# print("MAPE of cl:", mape(y_test_all[:, 0], res[:, 0]))
print("*" * 100)
print("UIUC数据Cd误差（UnTransfer）：")
mse_XFLR_Cd = error(y_test_all[:, 1], res[:, 1])
print("MSE of Cd:", mse_XFLR_Cd)
r2_XFLR_Cd = r2_score(y_test_all[:, 1], res[:, 1])
print("R2 of Cd:", r2_XFLR_Cd)
# print("MAPE of cd:", mape(y_test_all[:, 1], res[:, 1]))
print("*" * 100)

# Transfer==============================================================================
print("UIUC数据Cl/Cd误差（Transfer）：")
mse_UIUC_Cl = error(Ty_test_all[:, 0], Tres[:, 0])
print("MSE of Cl/Cd:", mse_UIUC_Cl)
r2_UIUC_Cl = r2_score(Ty_test_all[:, 0], Tres[:, 0])
print("R2 of Cl/Cd", r2_UIUC_Cl)
print("*" * 100)
print("UIUC数据Cd误差（Transfer）：")
mse_UIUC_Cd = error(Ty_test_all[:, 1], Tres[:, 1])
print("MSE of Cd:", mse_UIUC_Cd)
r2_UIUC_Cd = r2_score(Ty_test_all[:, 1], Tres[:, 1])
print("R2 of Cd", r2_UIUC_Cd)
# print("MAPE of cd",mean_absolute_percentage_error(Ty_test_all[:, 1], Tres[:, 1])*100)

print("*" * 100)

# print("MAPE of cl",mean_absolute_percentage_error(Ty_test_all[:, 0], Tres[:, 0])*100)

# 二维图==============================
# unTransfer排序=============================================================================
# unTrans_data = np.hstack((x_test_all, y_test_all, res))
# unTrans_sorted_indices = np.argsort(unTrans_data[:, 0])  # 排序规则
# unTrans_data = unTrans_data[unTrans_sorted_indices]
#
# uh1 = len(x_test_all[x_test_all[:, 0] < 70000])
# uh2 = len(x_test_all[x_test_all[:, 0] < 150000])
# uh3 = len(x_test_all[x_test_all[:, 0] < 250000])
#
# unTrans_res1 = np.array(unTrans_data)[:uh1, :]
# unTrans_res2 = np.array(unTrans_data)[uh1:uh2, :]
# unTrans_res3 = np.array(unTrans_data)[uh2:uh3, :]
# unTrans_res4 = np.array(unTrans_data)[uh3:, :]
#
# # 分别根据攻角排序
# unTrans_sorted_indices1 = np.argsort(unTrans_res1[:, 1])
# unTrans_sorted_indices2 = np.argsort(unTrans_res2[:, 1])
# unTrans_sorted_indices3 = np.argsort(unTrans_res3[:, 1])
# unTrans_sorted_indices4 = np.argsort(unTrans_res4[:, 1])
#
# unTrans_res1 = unTrans_res1[unTrans_sorted_indices1]
# unTrans_res2 = unTrans_res2[unTrans_sorted_indices2]
# unTrans_res3 = unTrans_res3[unTrans_sorted_indices3]
# unTrans_res4 = unTrans_res4[unTrans_sorted_indices4]
#
# # Transfer排序=======================================================================================
# all_data = np.hstack((Tx_test_all, Ty_test_all, Tres))  # 合并数据集
# sorted_indices = np.argsort(all_data[:, 0])  # 排序规则
# all_data = all_data[sorted_indices]  # 排序

# Tx_test_all, Ty_test_all, Tres = np.array_split(all_data, 3, axis=1)  # 水平分割

# 按雷诺数分割、排序

# h1 = len(Tx_test_all[Tx_test_all[:, 0] < 70000])  # Re < 7000数据有多少
# h2 = len(Tx_test_all[Tx_test_all[:, 0] < 150000])
# h3 = len(Tx_test_all[Tx_test_all[:, 0] < 250000])
#
# res1 = np.array(all_data)[:h1, :]
# res2 = np.array(all_data)[h1:h2, :]
# res3 = np.array(all_data)[h2:h3, :]
# res4 = np.array(all_data)[h3:, :]
#
# # 分别根据攻角排序
# sorted_indices1 = np.argsort(res1[:, 1])
# sorted_indices2 = np.argsort(res2[:, 1])
# sorted_indices3 = np.argsort(res3[:, 1])
# sorted_indices4 = np.argsort(res4[:, 1])
#
# res1 = res1[sorted_indices1]
# res2 = res2[sorted_indices2]
# res3 = res3[sorted_indices3]
# res4 = res4[sorted_indices4]
#
# # 训练集
# # 合并数据集
#
# all_train = np.hstack((x_train, y_train))  # 合并数据集ju
#
# train_h1 = len(all_train[all_train[:, 0] < 70000])
# train_h2 = len(all_train[all_train[:, 0] < 150000])
# train_h3 = len(all_train[all_train[:, 0] < 250000])
#
# x1_train = np.array(all_train)[:train_h1, :]
# x2_train = np.array(all_train)[train_h1:train_h2, :]
# x3_train = np.array(all_train)[train_h2:train_h3, :]
# x4_train = np.array(all_train)[train_h3:, :]

#
# # 画图
# # Cl/Cd==================================================================
# labelsize = 15
# figCl_Cd1 = plt.figure(figsize=(8, 6))
# plt.tick_params(labelsize=labelsize)
# plt.xlabel('α/(°)')
# plt.ylabel('Cl/Cd')
# # plt.scatter(x1_train[:, 1], x1_train[:, 2], c="green", s=10, label="训练点 Re=60400")
# plt.scatter(unTrans_res1[:, 1], unTrans_res1[:, 2], c="black", s=12, label="样本点 Re=60400")
# plt.plot(unTrans_res1[:, 1], unTrans_res1[:, 4], marker='^', markersize=5, markerfacecolor='white', c="blue",
#          label="预测点(UnTransfer) Re=60400",
#          linewidth=1)
# plt.plot(res1[:, 1], res1[:, 4], c="red", marker='s', markersize=5, markerfacecolor='white',
#          label="预测点(Transfer) Re=60400", linewidth=1)
# plt.xlim(-7, 12)
# plt.ylim(-30, 80)
# plt.legend(loc=0, fontsize=10)  # 图例位置自
# plt.show()
# figCl_Cd1.savefig("Cl_Cd1.svg")
#
# figCl_Cd2 = plt.figure(figsize=(8, 6))
# plt.tick_params(labelsize=labelsize)
# plt.xlabel('α/(°)')
# plt.ylabel('Cl/Cd')
# # plt.scatter(x2_train[:, 1], x2_train[:, 2], c="green", s=10, label="训练点 Re=100800")
# plt.scatter(unTrans_res2[:, 1], unTrans_res2[:, 2], c="black", s=12, label="样本点 Re=100800")
# plt.plot(unTrans_res2[:, 1], unTrans_res2[:, 4], marker='^', markersize=5, markerfacecolor='white', c="blue",
#          label="预测点(UnTransfer) Re=100800",
#          linewidth=1)
# plt.plot(res2[:, 1], res2[:, 4], c="red", marker='s', markersize=5, markerfacecolor='white',
#          label="预测点(Transfer) Re=100800", linewidth=1)
# plt.xlim(-7, 12)
# plt.ylim(-30, 80)
# plt.legend(loc=0, fontsize=10)  # 图例位置自
# plt.show()
# figCl_Cd2.savefig("Cl_Cd2.svg")
#
# figCl_Cd3 = plt.figure(figsize=(8, 6))
# plt.tick_params(labelsize=labelsize)
# plt.xlabel('α/(°)')
# plt.ylabel('Cl/Cd')
# # plt.scatter(x3_train[:, 1], x3_train[:, 2], c="green", s=10, label="训练点 Re=201600")
# plt.scatter(unTrans_res3[:, 1], unTrans_res3[:, 2], c="black", s=12, label="样本点 Re=201600")
# plt.plot(unTrans_res3[:, 1], unTrans_res3[:, 4], marker='^', markersize=5, markerfacecolor='white', c="blue",
#          label="预测点(UnTransfer) Re=201600",
#          linewidth=1)
# plt.plot(res3[:, 1], res3[:, 4], marker='s', markersize=5, markerfacecolor='white', c="red",
#          label="预测点(Transfer) Re=201600", linewidth=1)
# plt.xlim(-7, 12)
# plt.ylim(-30, 80)
# plt.legend(loc=0, fontsize=10)  # 图例位置自
# plt.show()
# figCl_Cd3.savefig("Cl_Cd3.svg")
#
# figCl_Cd4 = plt.figure(figsize=(8, 6))
# plt.tick_params(labelsize=labelsize)
# plt.xlabel('α/(°)')
# plt.ylabel('Cl/Cd')
# # plt.scatter(x4_train[:, 1], x4_train[:, 2], c="green", s=10, label="训练点 Re=302700")
# plt.scatter(unTrans_res4[:, 1], unTrans_res4[:, 2], c="black", s=12, label="样本点 Re=302700")
# plt.plot(unTrans_res4[:, 1], unTrans_res4[:, 4], marker='^', markersize=5, markerfacecolor='white', c="blue",
#          label="预测点(UnTransfer) Re=302700",
#          linewidth=1)
# plt.plot(res4[:, 1], res4[:, 4], marker='s', markersize=5, markerfacecolor='white', c="red",
#          label="预测点(Transfer) Re=302700", linewidth=1)
# plt.xlim(-7, 12)
# plt.ylim(-30, 80)
# plt.legend(loc=0, fontsize=10)  # 图例位置自
# plt.show()
# figCl_Cd4.savefig("Cl_Cd4.svg")
# # Cd=====================================================================
# figCd1 = plt.figure(figsize=(8, 6))
# plt.tick_params(labelsize=labelsize)
# plt.xlabel('α/(°)')
# plt.ylabel('Cd')
# # plt.scatter(x1_train[:, 1], x1_train[:, 3], c="green", s=10, label="训练点 Re=60400")
# plt.scatter(unTrans_res1[:, 1], unTrans_res1[:, 3], c="black", s=12, label="样本点 Re=60400")
# plt.plot(unTrans_res1[:, 1], unTrans_res1[:, 5], marker='^', markersize=5, markerfacecolor='white', c="blue",
#          label="预测点(UnTransfer) Re=60400",
#          linewidth=1)
# plt.plot(res1[:, 1], res1[:, 5], marker='s', markersize=5, markerfacecolor='white', c="red",
#          label="预测点(Transfer) Re=60400", linewidth=1)
# plt.xlim(-7, 12)
# plt.ylim(0, 0.055)
# plt.legend(loc=0, fontsize=10)  # 图例位置自
# plt.show()
# figCd1.savefig("Cd1.svg")
#
# figCd2 = plt.figure(figsize=(8, 6))
# plt.tick_params(labelsize=labelsize)
# plt.xlabel('α/(°)')
# plt.ylabel('Cd')
# # plt.scatter(x2_train[:, 1], x2_train[:, 3], c="green", s=10, label="训练点 Re=100800")
# plt.scatter(unTrans_res2[:, 1], unTrans_res2[:, 3], c="black", s=12, label="样本点 Re=100800")
# plt.plot(unTrans_res2[:, 1], unTrans_res2[:, 5], marker='^', markersize=5, markerfacecolor='white', c="blue",
#          label="预测点(UnTransfer) Re=100800",
#          linewidth=1)
# plt.plot(res2[:, 1], res2[:, 5], marker='s', markersize=5, markerfacecolor='white', c="red",
#          label="预测点(Transfer) Re=100800", linewidth=1)
# plt.xlim(-7, 12)
# plt.ylim(0, 0.055)
# plt.legend(loc=0, fontsize=10)  # 图例位置自
# plt.show()
# figCd2.savefig("Cd2.svg")
#
# figCd3 = plt.figure(figsize=(8, 6))
# plt.tick_params(labelsize=labelsize)
# plt.xlabel('α/(°)')
# plt.ylabel('Cd')
# # plt.scatter(x3_train[:, 1], x3_train[:, 3], c="green", s=10, label="训练点 Re=201600")
# plt.scatter(unTrans_res3[:, 1], unTrans_res3[:, 3], c="black", s=12, label="样本点 Re=201600")
# plt.plot(unTrans_res3[:, 1], unTrans_res3[:, 5], marker='^', markersize=5, markerfacecolor='white', c="blue",
#          label="预测点(UnTransfer) Re=201600",
#          linewidth=1.5)
# plt.plot(res3[:, 1], res3[:, 5], marker='s', markersize=5, markerfacecolor='white', c="red",
#          label="预测点(Transfer) Re=201600", linewidth=1)
# plt.xlim(-7, 12)
# plt.ylim(0, 0.055)
# plt.legend(loc=0, fontsize=10)  # 图例位置自
# plt.show()
# figCd3.savefig("Cd3.svg")
#
# figCd4 = plt.figure(figsize=(8, 6))
# plt.tick_params(labelsize=labelsize)
# plt.xlabel('α/(°)')
# plt.ylabel('Cd')
# # plt.scatter(x4_train[:, 1], x4_train[:, 3], c="green", s=10, label="训练点 Re=302700")
# plt.scatter(unTrans_res4[:, 1], unTrans_res4[:, 3], c="black", s=12, label="样本点 Re=302700")
# plt.plot(unTrans_res4[:, 1], unTrans_res4[:, 5], marker='^', markersize=5, markerfacecolor='white', c="blue",
#          label="预测点(UnTransfer) Re=302700",
#          linewidth=1)
# plt.plot(res4[:, 1], res4[:, 5], marker='s', markersize=5, markerfacecolor='white', c="red",
#          label="预测点(Transfer) Re=302700", linewidth=1)
# plt.xlim(-7, 12)
# plt.ylim(0, 0.055)
# plt.legend(loc=0, fontsize=10)  # 图例位置自
# plt.show()
# figCd4.savefig("Cd4.svg")

# # 画图(大量）
# # Cl/Cd==================================================================
# labelsize = 15
#
# figCl_Cd1 = plt.figure(figsize=(8, 6))
# plt.tick_params(labelsize=labelsize)
# plt.xlabel('α/(°)')
# plt.ylabel('Cl/Cd')
# plt.scatter(unTrans_res1[:, 1], unTrans_res1[:, 2], c="black", s=10, label="样本点 Re=60400")
# plt.plot(inX1[:, 1], outY1[:, 0], c="blue", label="预测点(UnTransfer) Re=60400", linewidth=1.5)
# plt.plot(inX1[:, 1], ToutY1[:, 0], c="red", label="预测点(Transfer) Re=60400", linewidth=1.5)
# plt.ylim(-30, 80)
# plt.xlim(-7, 12)
# plt.legend(loc=2, fontsize=15)  # 图例位置自
# plt.show()
# figCl_Cd1.savefig("Cl_Cd1.svg", dpi=1200)
#
# figCl_Cd2 = plt.figure(figsize=(8, 6))
# plt.tick_params(labelsize=labelsize)
# plt.xlabel('α/(°)')
# plt.ylabel('Cl/Cd')
# plt.scatter(unTrans_res2[:, 1], unTrans_res2[:, 2], c="black", s=10, label="样本点 Re=100800")
# plt.plot(inX2[:, 1], outY2[:, 0], c="blue", label="预测点(UnTransfer) Re=100800", linewidth=1.5)
# plt.plot(inX2[:, 1], ToutY2[:, 0], c="red", label="预测点(Transfer) Re=100800", linewidth=1.5)
# plt.ylim(-30, 80)
# plt.xlim(-7, 12)
# plt.legend(loc=2, fontsize=15)  # 图例位置自
# plt.show()
# figCl_Cd2.savefig("Cl_Cd2.svg", dpi=1200)
#
# figCl_Cd3 = plt.figure(figsize=(8, 6))
# plt.tick_params(labelsize=labelsize)
# plt.xlabel('α/(°)')
# plt.ylabel('Cl/Cd')
# plt.scatter(unTrans_res3[:, 1], unTrans_res3[:, 2], c="black", s=10, label="样本点 Re=201600")
# plt.plot(inX3[:, 1], outY3[:, 0], c="blue", label="预测点(UnTransfer) Re=201600", linewidth=1.5)
# plt.plot(inX3[:, 1], ToutY3[:, 0], c="red", label="预测点(Transfer) Re=201600", linewidth=1.5)
# plt.ylim(-30, 80)
# plt.xlim(-7, 12)
# plt.legend(loc=2, fontsize=15)  # 图例位置自
# plt.show()
# figCl_Cd3.savefig("Cl_Cd3.svg", dpi=1200)
#
# figCl_Cd4 = plt.figure(figsize=(8, 6))
# plt.tick_params(labelsize=labelsize)
# plt.xlabel('α/(°)')
# plt.ylabel('Cl/Cd')
# plt.scatter(unTrans_res4[:, 1], unTrans_res4[:, 2], c="black", s=10, label="样本点 Re=302700")
# plt.plot(inX4[:, 1], outY4[:, 0], c="blue", label="预测点(UnTransfer) Re=302700", linewidth=1.5)
# plt.plot(inX4[:, 1], ToutY4[:, 0], c="red", label="预测点(Transfer) Re=302700", linewidth=1.5)
# plt.ylim(-30, 80)
# plt.xlim(-7, 12)
# plt.legend(loc=2, fontsize=15)  # 图例位置自
# plt.show()
# figCl_Cd4.savefig("Cl_Cd4.svg", dpi=1200)
#
# # Cd=====================================================================
#
#
# figCd1 = plt.figure(figsize=(8, 6))
# plt.tick_params(labelsize=labelsize)
# plt.xlabel('α/(°)')
# plt.ylabel('Cd')
# plt.scatter(unTrans_res1[:, 1], unTrans_res1[:, 3], c="black", s=10, label="样本点 Re=60400")
# plt.plot(inX1[:, 1], outY1[:, 1], c="blue", label="预测点(UnTransfer) Re=60400", linewidth=1.5)
# plt.plot(inX1[:, 1], ToutY1[:, 1], c="red", label="预测点(Transfer) Re=60400", linewidth=1.5)
# plt.ylim(0, 0.055)
# plt.xlim(-7, 12)
# plt.legend(loc=2, fontsize=15)  # 图例位置自
# plt.show()
# figCd1.savefig("Cd1.svg", dpi=1200)
#
# figCd2 = plt.figure(figsize=(8, 6))
# plt.tick_params(labelsize=labelsize)
# plt.xlabel('α/(°)')
# plt.ylabel('Cd')
# plt.scatter(unTrans_res2[:, 1], unTrans_res2[:, 3], c="black", s=10, label="样本点 Re=100800")
# plt.plot(inX2[:, 1], outY2[:, 1], c="blue", label="预测点(UnTransfer) Re=100800", linewidth=1.5)
# plt.plot(inX2[:, 1], ToutY2[:, 1], c="red", label="预测点(Transfer) Re=100800", linewidth=1.5)
# plt.ylim(0, 0.055)
# plt.xlim(-7, 12)
# plt.legend(loc=2, fontsize=15)  # 图例位置自
# plt.show()
# figCd2.savefig("Cd2.svg", dpi=1200)
#
# figCd3 = plt.figure(figsize=(8, 6))
# plt.tick_params(labelsize=labelsize)
# plt.xlabel('α/(°)')
# plt.ylabel('Cd')
# plt.scatter(unTrans_res3[:, 1], unTrans_res3[:, 3], c="black", s=10, label="样本点 Re=201600")
# plt.plot(inX3[:, 1], outY3[:, 1], c="blue", label="预测点(UnTransfer) Re=201600", linewidth=1.5)
# plt.plot(inX3[:, 1], ToutY3[:, 1], c="red", label="预测点(Transfer) Re=201600", linewidth=1.5)
# plt.ylim(0, 0.055)
# plt.xlim(-7, 12)
# plt.legend(loc=2, fontsize=15)  # 图例位置自
# plt.show()
# figCd3.savefig("Cd3.svg", dpi=1200)
#
# figCd4 = plt.figure(figsize=(8, 6))
# plt.tick_params(labelsize=labelsize)
# plt.xlabel('α/(°)')
# plt.ylabel('Cd')
# plt.scatter(unTrans_res4[:, 1], unTrans_res4[:, 3], c="black", s=10, label="样本点 Re=302700")
# plt.plot(inX4[:, 1], outY4[:, 1], c="blue", label="预测点(UnTransfer) Re=302700", linewidth=1.5)
# plt.plot(inX4[:, 1], ToutY4[:, 1], c="red", label="预测点(Transfer) Re=302700", linewidth=1.5)
# plt.ylim(0, 0.055)
# plt.xlim(-7, 12)
# plt.legend(loc=2, fontsize=15)  # 图例位置自
# plt.show()
# figCd4.savefig("Cd4.svg", dpi=1200)

# 单个点的误差图
# 样本编号
# Cd error


AF_model = AS_MFS_ALL.AN_MFS()
AF_model.source_data_init()
AF_model.target_data_init()
AF_model.source_model_fit()
AF_model.train_model_result()
AF_model.different()
AF_model.d_model_fit()
AF_model.final_result()
y_pre = AF_model.source_model.predict(AF_model.x_target_test_st) + AF_model.d_model.predict(AF_model.x_target_test_st)



Y1 = abs(Tres[:, 0] - y_test_all[:, 0])
Y2 = abs(Tres[:, 1] - y_test_all[:, 1])

UY1 = abs(res[:, 0] - y_test_all[:, 0])
UY2 = abs(res[:, 1] - y_test_all[:, 1])

AY1 = abs(y_pre[:, 1] - y_test_all[:, 0])
AY2 = abs(y_pre[:, 0] - y_test_all[:, 1])


labelsize = 15

X = np.linspace(0, np.array(y_test_all).shape[0], np.array(y_test_all).shape[0], endpoint=True)

width = 0.2

# Cl/Cd =======================================================================
figCl_Cd_error, ax = plt.subplots(1, 1, figsize=(7, 5))
plt.title('绝对值误差对比', fontsize=15)
plt.tick_params(labelsize=labelsize)
plt.xlabel('样本编号', fontsize=15)
plt.ylabel('Cl/Cd 绝对值误差', fontsize=15)
# plt.ylim(0, 30)
ax.bar(X - width, UY1, width=width, label='LSTM')
ax.bar(X, Y1, width=width, label='迁移')
ax.bar(X + width, AY1, width=width, label='AS-MFS')
ax.legend(loc=0, fontsize=15, frameon=False)  # 图例位置自

# 画中画 ====================================================================
# axins = inset_axes(ax, width="50%", height="30%", loc='lower left',
#                    bbox_to_anchor=(0.3, 0.6, 1, 1),
#                    bbox_transform=ax.transAxes)
# axins.bar(X - width, UY1, width=width, label='LSTM')
# axins.bar(X, Y1, width=width, label='迁移')
# axins.bar(X + width, AY1, width=width, label='AS-MFS')
#
# # 设置放大区间
# zone_left = 10
# zone_right = 40
#
# # 坐标轴的扩展比例（根据实际数据调整）
# x_ratio = 0.05  # x轴显示范围的扩展比例
# y_ratio = 0.05  # y轴显示范围的扩展比例
#
# # X轴的显示范围
# xlim0 = X[zone_left] - (X[zone_right] - X[zone_left]) * x_ratio
# xlim1 = X[zone_right] + (X[zone_right] - X[zone_left]) * x_ratio
#
# # Y轴的显示范围
# y = np.hstack((UY1[zone_left:zone_right], Y1[zone_left:zone_right]))
# ylim0 = np.min(y) - (np.max(y) - np.min(y)) * y_ratio
# ylim1 = np.max(y) + (np.max(y) - np.min(y)) * y_ratio
#
# # 调整子坐标系的显示范围
# axins.tick_params(labelsize=10)
# axins.set_xlim(xlim0, xlim1)
# axins.set_ylim(ylim0, ylim1)
#
# mark_inset(ax, axins, loc1=3, loc2=4, fc="none", ec='k', lw=1)

plt.show()
figCl_Cd_error.savefig("figCl_Cd_error.svg", bbox_inches='tight')

# Cd==============================================================================================
figCd_error, ax = plt.subplots(1, 1, figsize=(7, 5))

# ax = plt.gca()
# ax.ticklabel_format(style='sci', scilimits=(0, 0), axis='y')
# ax.get_yaxis().get_offset_text().set(va='bottom', ha='left')
# ax.yaxis.get_offset_text().set_fontsize(15)  # 设置1e6的大小与位置
# # plt.subplots_adjust(left=0.14)  # 图片显示不完整时使用

plt.title('绝对值误差对比', fontsize=15)
plt.tick_params(labelsize=labelsize)
plt.xlabel('样本编号', fontsize=15)
plt.ylabel('Cd 绝对值误差', fontsize=15)
# plt.ylim(0, 0.009)
plt.bar(X - width, UY2, width=width, label='LSTM')
plt.bar(X, Y2, width=width, label='迁移')
plt.bar(X + width, AY2, width=width, label='AS-MFS')
plt.legend(loc=0, fontsize=15, frameon=False)  # 图例位置自

# 画中画 ===================================================================
# axins = inset_axes(ax, width="50%", height="30%", loc='lower left',
#                    bbox_to_anchor=(0.3, 0.6, 1, 1),
#                    bbox_transform=ax.transAxes)
# axins.bar(X - width, UY2, width=width, label='LSTM')
# axins.bar(X, Y2, width=width, label='迁移')
# axins.bar(X + width, AY2, width=width, label='AS-MFS')
#
# # 设置放大区间
# zone_left = 10
# zone_right = 40
#
# # 坐标轴的扩展比例（根据实际数据调整）
# x_ratio = 0.05  # x轴显示范围的扩展比例
# y_ratio = 0.05  # y轴显示范围的扩展比例
#
# # X轴的显示范围
# xlim0 = X[zone_left] - (X[zone_right] - X[zone_left]) * x_ratio
# xlim1 = X[zone_right] + (X[zone_right] - X[zone_left]) * x_ratio
#
# # Y轴的显示范围
# y = np.hstack((UY2[zone_left:zone_right], Y2[zone_left:zone_right]))
# ylim0 = np.min(y) - (np.max(y) - np.min(y)) * y_ratio
# ylim1 = np.max(y) + (np.max(y) - np.min(y)) * y_ratio
#
# # 调整子坐标系的显示范围
# axins.tick_params(labelsize=10)
# axins.set_xlim(xlim0, xlim1)
# axins.set_ylim(ylim0, ylim1)
#
# mark_inset(ax, axins, loc1=3, loc2=4, fc="none", ec='k', lw=1)

plt.show()
figCd_error.savefig("figCd_error.svg", bbox_inches='tight')
