import os
os.environ['KMP_DUPLICATE_LIB_OK'] = 'True'
import tensorflow as tf
from keras import losses, Sequential
# 画图
import matplotlib.pyplot as plt
# 多表格联合显示
import seaborn as sns
# 数据处理
import pandas as pd

from tensorflow.python import keras
from tensorflow.python.keras import layers, optimizers, Sequential

# 回调函数
from tensorflow.python.keras.utils import data_utils
from tensorflow.python.keras import callbacks
from sklearn import svm
from sklearn.model_selection import cross_val_score
# 导入ReduceLROnPlateau
from keras.callbacks import ReduceLROnPlateau

os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'

# 在线下载汽车效能数据集
dataset_path = data_utils.get_file("auto-mpg-ljf.data",
                                   "http://archive.ics.uci.edu/ml/machine-learning-databases/auto-mpg/auto-mpg.data")

# 效能（公里数每加仑），气缸数，排量，马力，重量
# 加速度，型号年份，产地
column_names = ['MPG', 'Cylinders', 'Displacement', 'Horsepower', 'Weight',
                'Acceleration', 'Model Year', 'Origin']

# 将data格式数据读入内存，并加上每一列的名字
# skipinitialspace：忽略分隔符后面的空白（默认为False）
# na_values:原数据中有数据缺失为？，na_values='?'可以将？标明为NAN（未知值）
# comment：标识着多余的行不被解析，='\t'的意思是忽略以\t(一个制表符)开头的行内容，这部分内容不读取
# sep:指定分隔符，如果不指定参数，则会尝试使用逗号分隔。
raw_dataset = pd.read_csv(dataset_path, names=column_names,
                          na_values="?", comment='\t',
                          sep=" ", skipinitialspace=True)

# 复制数据集
dataset = raw_dataset.copy()
dataset.tail()

# 看最末尾5行数据
print(dataset.tail())

# 查看部分数据
# isna（）用于检测缺失值，统计有那些数据不全
dataset.head()
dataset.isna().sum()  # 统计空白数据
dataset = dataset.dropna()  # 删除空白数据项
dataset.isna().sum()  # 再次统计空白数据

# 处理类别型数据，其中origin列代表了类别1,2,3,分布代表产地：美国、欧洲、日本  改成onehot格式
# 先弹出（删除并返回）origin这一列
origin = dataset.pop('Origin')
# 根据origin列来写入新列
dataset['USA'] = (origin == 1) * 1.0
dataset['Europe'] = (origin == 2) * 1.0
dataset['Japan'] = (origin == 3) * 1.0
dataset.tail()  # 查看新表格的后几项

# 切分为训练集和测试集
train_dataset = dataset.sample(frac=0.8, random_state=0)
test_dataset = dataset.drop(train_dataset.index)

# sns.pairplot用来展示特征之间的关系
sns.pairplot(train_dataset[["MPG", "Cylinders", "Displacement", "Weight"]], diag_kind="None")
plt.show()

# 统计各种数据 并且转置易于观察
train_stats = train_dataset.describe()
train_stats.pop("MPG")
train_stats = train_stats.transpose()
print(train_stats)

# 移动MPG油耗效能这一列为真实标签Y
train_labels = train_dataset.pop('MPG')
test_labels = test_dataset.pop('MPG')

# 查看训练集的输入X的统计数据
train_stats = train_dataset.describe()
train_stats.pop("MPG")  # 仅保留输入x
train_stats = train_stats.transpose()  # 转置


# 标准化数据 制作归一化函数 这里用的是Zscore方法
def norm(x):
    return (x - train_stats['mean']) / train_stats['std']  # 减去每一个字段的均值，并除以标准差


normed_train_data = norm(train_dataset)  # 标准化训练集
normed_test_data = norm(test_dataset)  # 标准化测试集


# keras.regularizers.l1(0.01)
# keras.regularizers.l2(0.01)
# keras.regularizers.l1_l2(l1=0.01, l2=0.01)
# class WarmupLR:
#     def __init__(self, optimizer, num_warm) -> None:
#         self.optimizer = optimizer
#         self.num_warm = num_warm
#         self.lr = [group['lr'] for group in self.optimizer.param_groups]
#         self.num_step = 0
#
#     def __compute(self, lr) -> float:
#         return lr * min(self.num_step ** (-0.5), self.num_step * self.num_warm ** (-1.5))
#
#     def step(self) -> None:
#         self.num_step += 1
#         lr = [self.__compute(lr) for lr in self.lr]
#         for i, group in enumerate(self.optimizer.param_groups):
#             group['lr'] = lr[i]


def build_model():
    model = Sequential([
        layers.Dense(50, activation=tf.nn.relu, input_shape=[len(train_dataset.keys())]),
        layers.Dense(50, activation=tf.nn.relu),
        layers.Dense(1),
        layers.Dropout(0.5)
    ])

    optimizer = optimizers.adam_v2.Adam(learning_rate=1e-3)
    # keras.optimizers.rmsprop_v2.RMSprop()
    model.compile(loss='mean_squared_error',
                  optimizer=optimizer,
                  metrics=['mean_absolute_error', 'mean_squared_error'])  # 度量的参数为mae（绝对值误差）和mse（均方误差）的loss
    return model


model = build_model()
model.summary()

# 现在试用下这个模型。从训练数据中批量获取‘10’条例子并对这些例子调用 model.predict 。
example_batch = normed_train_data[:10]  # 取前10个数据
print(example_batch)
example_result = model.predict(example_batch)  # 预测
print(example_result)
EPOCHS = 300
# verbose：日志显示,verbose = 0 为不在标准输出流输出日志信息,verbose = 1 为输出进度条记录,verbose = 2 为每个epoch输出一行记录
history = model.fit(
    normed_train_data, train_labels,
    epochs=EPOCHS, validation_split=0.2, verbose=0
)

# 打印出训练集和测试集的大小
print(normed_train_data.shape, train_labels.shape)
print(normed_test_data.shape, test_labels.shape)

# 利用切分的训练集数据构建数据集对象：
train_db = tf.data.Dataset.from_tensor_slices((normed_train_data.values, train_labels.values))  # 构建Dataset对象
train_db = train_db.shuffle(100).batch(32)  # 随机打散，批量化

# 数据交叉验证
clf = svm.SVC(kernel='linear', C=1)
scores = cross_val_score(clf, normed_train_data, normed_test_data, cv=5)
print("Accuracy: %0.2f (+/- %0.2f)" % (scores.mean(), scores.std() * 2))

# 分段常数衰减
# 定义ReduceLROnPlateau
reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.5, patience=2, verbose=1)
# 使用ReduceLROnPlateau
model.fit(train_dataset, test_dataset, callbacks=[reduce_lr])


class Network(keras.Model):
    # 回归网络
    def __init__(self):
        super(Network, self).__init__()
        # 创建3个全连接层
        self.fc1 = layers.Dense(50, activation='relu')
        self.fc2 = layers.Dense(50, activation='relu')
        self.fc3 = layers.Dense(1)

    def call(self, inputs, training=None, mask=None):
        # 依次通过3个全连接层
        x = self.fc1(inputs)
        x = self.fc2(x)
        x = self.fc3(x)

        return x


model = Network()  # 创建网络类实例
# 通过build函数完成内部张量的创建，其中4为任意设置的batch数量，9为输入特征长度
model.build(input_shape=(None, 9))
model.summary()  # 打印网络信息
optimizer = tf.keras.optimizers.RMSprop(0.001)  # 创建优化器，指定学习率

for epoch in range(200):  # 200个Epoch
    for step, (x, y) in enumerate(train_db):  # 遍历一次训练集
        # 梯度记录器，训练时需要使用它
        with tf.GradientTape() as tape:
            out = model(x)  # 通过网络获得输出
            loss = tf.reduce_mean(losses.MSE(y, out))  # 计算MSE
            mae_loss = tf.reduce_mean(losses.MAE(y, out))  # 计算MAE

        # 间隔性打印训练误差
        if step % 10 == 0:
            print(epoch, step, float(loss))

        # 计算梯度，并更新
        grads = tape.gradient(loss, model.trainable_variables)
        optimizer.apply_gradients(zip(grads, model.trainable_variables))


def plot_history(history):
    hist = pd.DataFrame(history.history)
    hist['epoch'] = history.epoch
    #
    plt.figure()
    plt.xlabel('Epoch')
    plt.ylabel('Mean Abs Error [MPG]')
    plt.plot(hist['epoch'].to_numpy(), hist['mean_absolute_error'].to_numpy(),
             label='Train Error')
    plt.plot(hist['epoch'].to_numpy(), hist['val_mean_absolute_error'].to_numpy(),
             label='Val Error')
    plt.ylim()
    plt.legend()
    #
    plt.figure()
    plt.xlabel('Epoch')
    plt.ylabel('Mean Square Error [$MPG^2$]')
    plt.plot(hist['epoch'].to_numpy(), hist['mean_squared_error'].to_numpy(),
             label='Train Error')
    plt.plot(hist['epoch'].to_numpy(), hist['val_mean_squared_error'].to_numpy(),
             label='Val Error')
    plt.ylim()
    plt.legend()
    plt.show()


plot_history(history)
model = build_model()
# 提前终止训练
# monitor: 监控的数据接口，有’acc’,’val_acc’,’loss’,’val_loss’等等。正常情况下如果有验证集，就用’val_acc’或者’val_loss’。
# patience：能够容忍多少个epoch内都没有improvement
early_stop = callbacks.EarlyStopping(monitor='val_loss', patience=5)

history = model.fit(normed_train_data, train_labels, epochs=EPOCHS,
                    validation_split=0.2, verbose=0, callbacks=[early_stop])
plot_history(history)

# 用没见过的数据做评估
loss, mae, mse = model.evaluate(normed_test_data, test_labels, verbose=0)
print("Testing set Mean Abs Error: {:5.2f} MPG".format(mae))

# 使用测试集中的数据预测 MPG 值,将数据展平成一行
test_predictions = model.predict(normed_test_data).flatten()
# 显示对比
plt.scatter(test_labels, test_predictions)
plt.xlabel('True Values [MPG]')
plt.ylabel('Predictions [MPG]')
plt.axis('equal')
plt.axis('square')
plt.xlim([0, plt.xlim()[1]])
plt.ylim([0, plt.ylim()[1]])
_ = plt.plot([-100, 100], [-100, 100])
plt.show()

# 误差分布
error = test_predictions - test_labels
plt.hist(error, bins=25)
plt.xlabel("Prediction Error [MPG]")
_ = plt.ylabel("Count")
plt.show()
