import os

os.environ['KMP_DUPLICATE_LIB_OK'] = 'True'
import os

os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
import warnings
warnings.filterwarnings('ignore')
import tensorflow as tf
from matplotlib import pyplot as plt
from tensorflow.python import keras
from tensorflow.python.keras import layers, optimizers, metrics, Sequential, losses, callbacks
from tensorflow.python.keras.utils import data_utils
from keras import datasets
from keras.datasets import cifar10
import matplotlib.pyplot as py
# panadas 数据处理用库
import pandas as pd
import seaborn as sns

dataset_path = data_utils.get_file("auto-mpg-ljf.data",
                                   "http://archive.ics.uci.edu/ml/machine-learning-databases/yeast/yeast.data")
column_names = ['CYT', 'NUC', 'MIT', 'ME3', 'ME2',
                'ME1', 'EXC', 'VAC']

# 将data格式数据读入内存，并加上每一列的名字
# skipinitialspace：忽略分隔符后面的空白（默认为False）
# na_values:原数据中有数据缺失为？，na_values='?'可以将？标明为NAN（未知值）
# comment：标识着多余的行不被解析，='\t'的意思是忽略以\t(一个制表符)开头的行内容，这部分内容不读取
# sep:指定分隔符，如果不指定参数，则会尝试使用逗号分隔。
raw_dataset = pd.read_csv(dataset_path, names=column_names,
                          na_values="?", comment='\t',
                          sep=" ", skipinitialspace=True)

# 复制数据集
dataset = raw_dataset.copy()
dataset.tail()

# 看最末尾5行数据
print("====查看数据====")
print(dataset.tail())

# 查看部分数据
# isna（）用于检测缺失值，统计有那些数据不全
dataset.head()
dataset.isna().sum()  # 统计空白数据
dataset = dataset.dropna()  # 删除空白数据项
dataset.isna().sum()  # 再次统计空白数据


def prepross(x, y):
    x = tf.cast(x, dtype=tf.float32) / 225. - 1
    y = tf.cast(y, dtype=tf.int32)
    return x, y


dataset.isna().sum()
print(dataset.isna().sum())

# 切分为训练集和测试集
train_dataset = dataset.sample(frac=0.8, random_state=0)
test_dataset = dataset.drop(train_dataset.index)
print("=======")
print(train_dataset)

# sns.pairplot用来展示特征之间的关系
sns.pairplot(train_dataset[['CYT', 'NUC', 'MIT', 'ME3', 'ME2', 'ME1', 'EXC', 'VAC']], diag_kind="None")
plt.show()

# 统计各种数据 并且转置易于观察
print("======统计======")
train_stats = train_dataset.describe()
train_stats.pop("CYT")
train_stats = train_stats.transpose()
print("统计")
print(train_stats)

# 将所有像素内容总和的连续作为标签项 剥离 ，将特征值从目标值或者"标签"中分离。 这个标签是你使用训练模型进行预测的值。
train_labels = train_dataset.pop("CYT")
test_labels = test_dataset.pop("CYT")


# 模型
def build_model():
    model = Sequential([
        layers.Dense(50, activation=tf.nn.relu, input_shape=[len(train_dataset.keys())]),
        layers.Dense(50, activation=tf.nn.relu),
        layers.Dense(50, activation=tf.nn.relu),
        layers.Dense(1)
    ])
    # 创建优化器
    optimizer = optimizers.adam_v2.Adam(learning_rate=1e-3)

    model.compile(loss='mean_squared_error',
                  optimizer=optimizer,
                  metrics=['mean_absolute_error', 'mean_squared_error'])  # 度量的参数为mae（绝对值误差）和mse（均方误差）的loss
    return model


model = build_model()
model.summary()

# 这个模型从训练数据中批量获取‘30’条例子并对这些例子调用 model.predict
example_batch = train_dataset[:30]  # 取前10个数据
print(example_batch)
example_result = model.predict(example_batch)  # 预测
print("====预测====")
print(example_result)
EPOCHS = 300
# verbose：日志显示,verbose = 0 为不在标准输出流输出日志信息,verbose = 1 为输出进度条记录,verbose = 2 为每个epoch输出一行记录
history = model.fit(
    train_dataset, train_labels,
    epochs=EPOCHS, validation_split=0.2, verbose=0
)


def plot_history(history):
    hist = pd.DataFrame(history.history)
    hist['epoch'] = history.epoch
    #
    plt.figure()
    plt.xlabel('Epoch')
    plt.ylabel('Mean Abs Error [CYT]')
    plt.plot(hist['epoch'].to_numpy(), hist['mean_absolute_error'].to_numpy(),
             label='Train Error')
    plt.plot(hist['epoch'].to_numpy(), hist['val_mean_absolute_error'].to_numpy(),
             label='Val Error')
    plt.ylim()
    plt.legend()
    #
    plt.figure()
    plt.xlabel('Epoch')
    plt.ylabel('Mean Square Error [$CYT^2$]')
    plt.plot(hist['epoch'].to_numpy(), hist['mean_squared_error'].to_numpy(),
             label='Train Error')
    plt.plot(hist['epoch'].to_numpy(), hist['val_mean_squared_error'].to_numpy(),
             label='Val Error')
    plt.ylim()
    plt.legend()
    plt.show()


plot_history(history)
model = build_model()
# 提前终止训练
# monitor: 监控的数据接口，有’acc’,’val_acc’,’loss’,’val_loss’等等。正常情况下如果有验证集，就用’val_acc’或者’val_loss’。
# patience：能够容忍多少个epoch内都没有improvement
early_stop = callbacks.EarlyStopping(monitor='val_loss', patience=5)

history = model.fit(train_dataset, train_labels, epochs=EPOCHS,
                    validation_split=0.2, verbose=0, callbacks=[early_stop])
plot_history(history)
