# -*- coding: utf-8 -*-
# Import libraries necessary for this project
# 载入此项目所需要的库
import numpy as np
import pandas as pd


# 双曲正切函数,该函数为奇函数
def tanh(x):
    return np.tanh(x)


# tanh导函数性质:f'(t) = 1 - f(x)^2
def tanh_derivative(x):
    return 1 - tanh(x) ** 2


# sigmoid函数
def sigmoid(x):
    return 1 / (1 + np.exp(-x))


def sigmoid_derivative(x):
    return sigmoid(x) * (1 - sigmoid(x))


class NeuralNetwork:
    def __init__(self, layers, activation='sigmoid'):
        '''
        :参数layers: 神经网络的结构(输入层-隐含层-输出层包含的结点数列表)
        :参数activation: 激活函数类型
        '''
        if activation == 'tanh':  # 也可以用其它的激活函数
            self.activation = tanh
            self.activation_prime = tanh_derivative
        elif activation == 'sigmoid':  # 也可以用其它的激活函数
            self.activation = sigmoid
            self.activation_prime = sigmoid_derivative
        else:
            pass

        # 存储权值矩阵
        self.weights = []

        # range of weight values (-1,1)
        # 初始化输入层和隐含层之间的权值
        for i in range(1, len(layers) - 1):
            r = 2 * np.random.random((layers[i - 1] + 1, layers[i] + 1)) - 1  # add 1 for bias node
            self.weights.append(r)

            # 初始化输出层权值
        r = 2 * np.random.random((layers[i] + 1, layers[i + 1])) - 1
        self.weights.append(r)

    def fit(self, X, Y, learning_rate=0.01, epochs=10000):
        # Add column of ones to X
        # This is to add the bias unit to the input layer
        X = np.hstack([np.ones((X.shape[0], 1)), X])

        for k in range(epochs):  # 训练固定次数
            if k % 1000 == 0:
                print('epochs:', k)

            # Return random integers from the discrete uniform distribution in the interval [0, low).
            i = np.random.randint(X.shape[0], high=None)
            a = [X[i]]  # 从m个输入样本中随机选一组

            for l in range(len(self.weights)):
                dot_value = np.dot(a[l], self.weights[l])  # 权值矩阵中每一列代表该层中的一个结点与上一层所有结点之间的权值
                activation = self.activation(dot_value)
                a.append(activation)

            # 反向递推计算delta:从输出层开始,先算出该层的delta,再向前计算
            error = Y[i] - a[-1]  # 计算输出层delta
            deltas = [error * self.activation_prime(a[-1])]

            # 从倒数第2层开始反向计算delta
            for l in range(len(a) - 2, 0, -1):
                deltas.append(deltas[-1].dot(self.weights[l].T) * self.activation_prime(a[l]))

            # [level3(output)->level2(hidden)]  => [level2(hidden)->level3(output)]
            deltas.reverse()  # 逆转列表中的元素

            # backpropagation
            # 1. Multiply its output delta and input activation to get the gradient of the weight.
            # 2. Subtract a ratio (percentage) of the gradient from the weight.
            for i in range(len(self.weights)):  # 逐层调整权值
                layer = np.atleast_2d(a[i])  # View inputs as arrays with at least two dimensions
                delta = np.atleast_2d(deltas[i])
                self.weights[i] += learning_rate * np.dot(layer.T, delta)  # 每输入一次样本,就更新一次权值

    def predict(self, x):
        a = np.concatenate((np.ones(1), np.array(x)))  # a为输入向量(行向量)
        for l in range(0, len(self.weights)):  # 逐层计算输出
            a = self.activation(np.dot(a, self.weights[l]))
        return a


if __name__ == '__main__':
    # Load the Boston housing dataset
    # 载入波士顿房屋的数据集
    data = pd.read_csv('boston_house_prices.csv')
    prices = data['MEDV']
    features = data.drop('MEDV', axis=1)

    # 计算prices中的'MEDV'的最小值、最大值、均值、中值和标准差；
    # 将运算结果储存在相应的变量中。

    # TODO: Minimum price of the data
    # 目标：计算价值的最小值
    minimum_price = min(prices)

    # TODO: Maximum price of the data
    # 目标：计算价值的最大值
    maximum_price = max(prices)

    # TODO: Mean price of the data
    # 目标：计算价值的平均值
    mean_price = np.mean(prices)

    # TODO: Median price of the data
    # 目标：计算价值的中值
    median_price = np.median(prices)

    # TODO: Standard deviation of prices of the data
    # 目标：计算价值的标准差
    std_price = np.std(prices)

    # Show the calculated statistics
    # 目标：输出计算的结果
    print("\nStatistics for Boston housing dataset:\n")
    print("Minimum price: ${:,.2f}".format(minimum_price))
    print("Maximum price: ${:,.2f}".format(maximum_price))
    print("Mean price: ${:,.2f}".format(mean_price))
    print("Median price ${:,.2f}".format(median_price))
    print("Standard deviation of prices: ${:,.2f}".format(std_price))

    # Success
    # 完成
    print("\nBoston housing dataset has {} data points with {} variables each.\n".format(*data.shape))

    # 接下来，你需要把波士顿房屋数据集分成训练和测试两个子集。通常在这个过程中，数据也会被重新排序，以消除数据集中由于
    # 序而产生的偏差。 在下面的代码中，你需要：
    # 使用 sklearn.model_selection 中的 train_test_split，
    # 将features和prices的数据都分成用于训练的数据子集和用于测试的数据子集。
    # 分割比例为：80%的数据用于训练，20%用于测试；
    # 选定一个数值以设定 train_test_split 中的 random_state ，这会确保结果的一致性；
    # 最终分离出的子集为X_train,X_test,y_train,和y_test。

    # TODO: Import 'train_test_split'
    from sklearn.model_selection import train_test_split

    # TODO: Shuffle and split the data into training and testing subsets
    split = train_test_split(features, prices, test_size=0.20, random_state=0)
    X_train, X_test, y_train, y_test = np.array(split[0]), np.array(split[1]), np.array(split[2]), np.array(split[3])
    # Success
    print("Training and testing split was successful.\n")
    # 从sklearn.preprocessing导入数据标准化模块
    from sklearn.preprocessing import StandardScaler
    # 分别初始化对特征和目标值的归一化
    # print(X_train.shape)

    # 训练与测试数据标准化处理
    # 从sklearn.preprocessing导入数据标准化模块
    from sklearn.preprocessing import StandardScaler

    # 分别特征和目标值的标准化器
    ss_X = StandardScaler()
    ss_y = StandardScaler()

    # 分别对训练和测试数据的特征以及目标值进行标准化处理
    X_train = ss_X.fit_transform(X_train)
    X_test = ss_X.transform(X_test)

    y_train = ss_y.fit_transform(y_train.reshape(-1, 1))
    y_test = ss_y.transform(y_test.reshape(-1, 1))

    '''
    X_train = (X_train - min(X_train)) / (max(X_train) - min(X_train))
    X_test = (X_test  - min(X_test )) / (max(X_test ) - min(X_test ))
    y_train = (y_train  - min(y_train )) / (max(y_train ) - min(y_train ))
    y_test  = (y_test   - min(y_test  )) / (max(y_test  ) - min(y_test  ))
    '''

    nn = NeuralNetwork([13, 3, 1])  # 网络结构: 1输入1输出,1个隐含层(包含3个节点)

    # 使用BP神经网络模型  BP Neural Network 对美国波士顿地区房价进行预测
    nn.fit(X_train, y_train, learning_rate=0.01, epochs=10000)  # 训练网络

    print('w:', nn.weights)  # 调整后的权值列表

    nn_y_predict = []
    for s in X_test:
        nn_y_predict.append(nn.predict(s))
        print(s, nn.predict(s))  # 预测房价

    # 从sklearn.metrics一次导入mean_squared_error以及mean_absolue_error用于回归性能评估
    from sklearn.metrics import r2_score, mean_squared_error, mean_absolute_error

    # 使用mean_squared_error模块，并输出评估结果。
    print('\nThe Mean Squared Error (MSE) of BP Neural Network is',
          mean_squared_error(ss_y.inverse_transform(y_test), ss_y.inverse_transform(nn_y_predict)))

    # 使用mean_absolute_error模块，并输出评估结果。
    print('The Mean Absoluate Error (MAE) of BP Neural Network is',
          mean_absolute_error(ss_y.inverse_transform(y_test), ss_y.inverse_transform(nn_y_predict)))
    print('\nQuestion : How to decrease the MSE and MAE in order to increase the accuracy of prediction ?')
