# 有一组西瓜数据如下，利用神经网络模型进行训练，并输出计算结果
# X1 = [0.697,0.774,0.634,0.608,0.556,0.403,0.481,0.437,0.666,0.243,0.245,0.343,0.639,0.657,0.360,0.593,0.719]
# X2 = [0.460,0.376,0.264,0.318,0.215,0.237,0.149,0.211,0.091,0.267,0.057,0.099,0.161,0.198,0.370,0.042,0.103]
# Y = [1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0]

import numpy as np
from numpy import *
import matplotlib.pylab as plt
from pylab import mpl

from sklearn.neural_network import MLPClassifier

from pylab import *
mpl.rcParams['font.sans-serif'] = ['SimHei']

X1 = [0.697,0.774,0.634,0.608,0.556,0.403,0.481,0.437,0.666,0.243,0.245,0.343,0.639,0.657,0.360,0.593,0.719]
X2 = [0.460,0.376,0.264,0.318,0.215,0.237,0.149,0.211,0.091,0.267,0.057,0.099,0.161,0.198,0.370,0.042,0.103]
Y = [1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0]

# 数据初始化X，y
m = len(X1)
X = np.c_[np.ones(m), X1, X2]
y = np.c_[Y]

# 数据重新洗牌
np.random.seed(3)
order = np.random.permutation(m)
X = X[order]
y = y[order]

# 将数据分割成训练集和测试集
d = int(0.75 * m)
train_X, test_X = np.split(X, [d, ])
train_y, test_y = np.split(y, [d, ])

# 定义simgmoid函数及导数
def sigmoid(x, deriv=False):
    if deriv:
        return x * (1 - x)
    return 1.0 / (1.0 + np.exp(-x))

# 初始化theta值，定义网络结构：L1=3, L2=17, L3=1
np.random.seed(3)
theta1 = 2 * np.random.random((3, 17)) - 1
theta2 = 2 * np.random.random((17, 1)) - 1

# 初始化代价函数
J_history = np.zeros(15000)
alpha = 0.1

# 开始神经网络计算
for i in range(15000):
    # 前向传播算法
    a1 = train_X
    z2 = a1.dot(theta1)
    a2 = sigmoid(z2)
    z3 = a2.dot(theta2)
    a3 = sigmoid(z3)
    h = a3

    # 代价函数
    J_history[i] = - 1.0 / m * (train_y.T.dot(np.log(h)) + (1 - train_y).T.dot(np.log(1 - h)))

    # 反向传播算法
    delta3 = a3 - train_y
    delta2 = delta3.dot(theta2.T) * sigmoid(a2, True)

    deltheta2 = 1 / m * a2.T.dot(delta3)
    deltheta1 = 1 / m * a1.T.dot(delta2)

    theta2 -= alpha * deltheta2
    theta1 -= alpha * deltheta1

plt.plot(J_history)
plt.show()


# 定义准确率
def accuracy(X, y):
    m = X.shape[0]
    count = 0

    for i in range(m):
        a1 = X[i]
        print('a1: ', a1)
        z2 = a1.dot(theta1)
        a2 = sigmoid(z2)
        z3 = a2.dot(theta2)
        a3 = sigmoid(z3)
        if bool(np.where(a3>=0.5, 1, 0) == bool(y[i])):
            count += 1
        return count / m

# 分别计算训练集和测试集的准确率
print('训练集的准确率： ', accuracy(train_X, train_y))
print('测试集的准确率： ', accuracy(test_X, test_y))

# 调用库函数
clf = MLPClassifier(max_iter=15000)
clf.fit(train_X, train_y.ravel())
print('训练集的准确率为：', clf.score(train_X, train_y))



