import os

import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from tensorflow import keras
import pandas as pd
from keras.optimizers import RMSprop
from keras import losses

from forward_neutron_network import ForwardNeutronNetwork


def load_dataset():
    # 在线下载汽车效能数据集
    # 如果文件尚未在缓存中，则从 URL 下载文件。具体可以查看源代码
    dataset_path = keras.utils.get_file("auto-mpg.data",
                                        "https://archive.ics.uci.edu/ml/machine-learning-databases/auto-mpg/auto-mpg.data")

    # 效能（公里数每加仑），气缸数，排量，马力，重量，加速度，型号年份，产地
    column_names = ['MPG', 'Cylinders', 'Displacement', 'Horsepower', 'Weight', 'Acceleration', 'Model Year', 'Origin']

    # 读取文本文件
    # na_values 参数可以配置哪些值需要处理成 NaN
    # comment 注释标识，指示不应分析行的部分。 如果在一行的开头找到该行，则将完全忽略该行。 此参数必须是单个字符。
    raw_dataset = pd.read_csv(dataset_path, names=column_names, na_values="?", comment='\t', sep=" ",
                              skipinitialspace=True)
    # 转换成 csv 格式以便直接查看
    raw_dataset.to_csv(os.getcwd() + "/auto-mpg.csv", index=False)

    dataset = raw_dataset.copy()
    return dataset


def preprocess_dataset(dataset):
    """
    清洗数据
    :param dataset: 要清晰的数据
    :return: 清晰后的干净数据
    """
    dataset = dataset.copy()

    # 统计空白数据,并清除
    dataset = dataset.dropna()

    # 处理类别型数据，其中origin列代表了类别1,2,3,分布代表产地：美国、欧洲、日本
    # 将所选列从原数据块中弹出，原数据块不再保留该列。
    origin = dataset.pop('Origin')

    # 根据origin列来写入新列
    dataset['USA'] = (origin == 1) * 1.0
    dataset['Europe'] = (origin == 2) * 1.0
    dataset['Japan'] = (origin == 3) * 1.0

    # print(dataset.head(10))
    dataset.to_csv(os.getcwd() + "/auto-mpg-clean.csv")

    # 切分为训练集和测试集
    #
    train_dataset = dataset.sample(frac=0.8, random_state=0)
    test_dataset = dataset.drop(train_dataset.index)

    return train_dataset, test_dataset


def draw_cylinders_mpg_picture(dataset):
    """
    绘制气缸数和 每加仑形似和的英里数关系图
    :param dataset:
    :return:
    """
    x = dataset['Cylinders']
    print(x)
    y = dataset['MPG']
    print(y)
    plt.scatter(x=x, y=y)
    plt.title("Cylinders - MPG")
    plt.xlabel('Cylinders')
    plt.ylabel('MPG')
    plt.show()


def draw_displacement_mpg_picture(dataset):
    """
    绘制 排量 和 每加仑形似和的英里数关系图
    :param dataset:
    :return:
    """
    x = dataset['Displacement']
    print(x)
    y = dataset['MPG']
    print(y)
    plt.scatter(x=x, y=y)
    plt.title("Displacement - MPG")
    plt.xlabel('Displacement')
    plt.ylabel('MPG')
    plt.show()


def draw_weight_mpg_picture(dataset):
    """
    绘制 排量 和 每加仑形似和的英里数关系图
    :param dataset:
    :return:
    """
    x = dataset['Weight']
    print(x)
    y = dataset['MPG']
    print(y)
    plt.scatter(x=x, y=y)
    plt.title("Weight - MPG")
    plt.xlabel('Weight')
    plt.ylabel('MPG')
    plt.show()


def draw_average(dataset):
    array = dataset[['Cylinders', 'MPG']].to_numpy()
    print(array)

    # 找到第一列中的唯一值
    cylinders = np.unique(array[:, 0])
    print(cylinders)

    # 初始化一个字典来存储结果
    results = {}
    average_mpg = []
    sum = []

    # 对每个唯一值进行分组计算
    for col_value in cylinders:
        # 使用布尔索引选择对应行
        rows = array[array[:, 0] == col_value]
        mean = np.mean(rows[:, 1])
        average_mpg.append(mean)

    plt.plot(cylinders, average_mpg)
    plt.show()


def draw_nation_average(dataset):
    array = dataset[['Cylinders', 'MPG', 'Japan']].to_numpy()
    print(array)

    array = array[array[:, 2] == 1, :2]
    print(array)

    # 找到第一列中的唯一值
    cylinders = np.unique(array[:, 0])
    print(cylinders)

    average_mpg = []

    # 对每个唯一值进行分组计算
    for col_value in cylinders:
        # 使用布尔索引选择对应行
        rows = array[array[:, 0] == col_value]
        mean = np.mean(rows[:, 1])
        average_mpg.append(mean)

    plt.plot(cylinders, average_mpg)
    plt.show()


def split_dataset(dataset):
    """
    将数据集分割为两个部分
    :param dataset: 训练数据，测试数据
    :return:
    """
    train_data = dataset.sample(frac=0.8, random_state=0)
    test_data = dataset.drop(train_data.index)
    return train_data, test_data


def norm(x, train_state):
    """
    对数据进行标准化
    :param x:
    :param train_state:
    :return:
    """
    return (x - train_state['mean']) / train_state['std']


def build_model():
    model = ForwardNeutronNetwork()
    model.build(input_shape=(4, 9))
    model.summary()
    return model


def train(model, train_db, optimizer, normed_test_data, test_label):
    """
    模型训练
    :param model: 模型
    :param train_db: 训练数据集
    :param optimizer: 优化器
    :param normed_test_data:测试数据集
    :param test_label: 测试数据集对应的标签
    :return:
    """
    train_mae_error = []
    test_mae_error = []

    for epoch in range(200):
        for step, (x, y) in enumerate(train_db):
            with tf.GradientTape() as tape:
                output = model(x)
                # y 标签-真实值，output 计算结果——预测值
                # reduce_mean 求解参数的平均值
                loss = tf.reduce_mean(losses.MSE(y, output))
                mae_loss = tf.reduce_mean(losses.MAE(y, output))

            if step % 10 == 0:
                print(f'epoch={epoch}, step={step}, loss:{float(loss)}')

            # 计算模型的梯度
            grads = tape.gradient(loss, model.trainable_variables)
            # 更新梯度信息
            optimizer.apply_gradients(zip(grads, model.trainable_variables))

        # 保存误差
        train_mae_error.append(float(mae_loss))
        out = model(tf.constant(normed_test_data.values))
        test_mae_error.append(tf.reduce_mean(losses.MAE(out, test_label)))

    return train_mae_error, test_mae_error


def plot_result(train_mae_error, test_mae_error):
    plt.figure()
    plt.xlabel('Epoch')
    plt.ylabel('MAE')

    plt.plot(train_mae_error, label='train')
    plt.plot(test_mae_error, label='test')

    plt.legend()
    plt.show()


if __name__ == '__main__':
    dataset = load_dataset()
    # 查看前5行数据
    print(dataset.head())
    # 查看后5行数据
    print(dataset.tail())

    train_dataset, test_dataset = preprocess_dataset(dataset)

    # 统计数据
    # sns_plot = sns.pairplot(train_dataset[["Cylinders", "Displacement", "Weight", "MPG"]], diag_kind="kde")

    # 查看训练集的输入X的统计数据
    train_stats = train_dataset.describe()
    train_stats.pop("MPG")
    # 转置
    train_stats = train_stats.transpose()
    print(train_stats)

    # 移动MPG油耗效能这一列为真实标签Y
    train_labels = train_dataset.pop('MPG')
    test_labels = test_dataset.pop('MPG')

    # 进行标准化
    normed_train_data = norm(train_dataset, train_stats)
    normed_test_data = norm(test_dataset, train_stats)

    print(normed_train_data.shape, train_labels.shape)
    print(normed_test_data.shape, test_labels.shape)

    # 构建神经网络模型
    model = build_model()

    optimizer = RMSprop(learning_rate=0.001)
    train_db = tf.data.Dataset.from_tensor_slices((normed_train_data, train_labels.values))

    train_db = train_db.shuffle(100).batch(32)

    train_mae_error, test_mae_error = train(model=model, train_db=train_db, optimizer=optimizer,
                                            normed_test_data=normed_test_data, test_label=test_labels)
    plot_result(train_mae_error, test_mae_error)
