import numpy as np
from sklearn import datasets, linear_model
import matplotlib.pyplot as plt
from sklearn.metrics import accuracy_score
'''
此程序主要是在之前的基础之上将函数封装成类

实现了要求的输出形式，包括：
1.使用softmax实现概率输出
2.使用cross entropy损失函数
3.将误分类点进行可视化

所利用的数据集仍然是make_moons
多分类我们在另外一个程序里展示
'''
def sigmoid(X):
    return 1.0/(1+np.exp(-X))

class NeuralNetwork:
    def __init__(self):
        self.iteration=2000
        self.step=0.01
        # set the nn parameters
        self.input_dim = np.shape(x)[1]
        self.hidden_layer = 2
        self.hidden_dim = np.array([8, 4])
        self.output_dim = 2

    def init_weight(self): # 初始化权重 只会使用一次
        W=[]
        B=[]
        for layer in range(self.hidden_layer + 1):
            if layer == 0:  # first hidden layer
                W.append(np.random.randn(self.input_dim, self.hidden_dim[0]) \
                            / np.sqrt(self.input_dim))
                B.append(np.zeros((1, self.hidden_dim[0])))
            elif layer == self.hidden_layer:  # output layer
                W.append(np.random.randn(self.hidden_dim[layer - 1], self.output_dim) \
                            / np.sqrt(self.hidden_dim[layer - 1]))
                B.append(np.zeros((1, self.output_dim)))
            else: # other hidden layers
                W.append(np.random.randn(self.hidden_dim[layer - 1], self.hidden_dim[layer]) \
                            / np.sqrt(self.hidden_dim[layer - 1]))
                B.append(np.zeros((1, self.hidden_dim[layer])))
            self.W=W
            self.B=B

    def forward(self,x): #前向计算 每次循环的开头都需要使用他和weight matrix计算当前输出
        z = []
        for layer in range(self.hidden_layer + 1):
            if layer == 0:
                z.append(sigmoid(np.dot(x, self.W[0]) + self.B[0]))
            else:
                z.append(sigmoid(np.dot(z[-1], self.W[layer]) + self.B[layer]))
        self.z=z
        # 使用softmax 函数将输出变成概率值，搭配cross entropy损失函数
        temp = np.sum(np.exp(z[-1]), axis=1)
        temp = np.array([temp]).T  # 求和的结果生成二维列向量
        z[-1] = np.exp(z[-1]) / temp
        return z[-1]

    def BP(self,x,y,iteration=None,step=None):
        # 在函数输入处留给自定义步长和迭代次数的空间，没有自定义则使用默认参数
        if not iteration:
            iteration=self.iteration
        if not step:
            step=self.step
        # 输入矩阵 x 和真实标签 y
        self.x=x
        self.y=y
        for i in range(iteration):
            self.forward(x) # 正向计算
            self.evaluate() # 精度评估
            #将前向计算得到的结果和上一轮的权值更新加入local variable
            W=self.W
            B=self.B
            z=self.z
            delta=[]
            for layer in range(self.hidden_layer, -1, -1):
                # 循环的次数应该是hidden_layer+1,可以和前面循环的W B 下标对应
                if layer == self.hidden_layer:  # output layer weight update
                    delta.append((-z[-1]+y)) # cross-entropy 更新输出层delta
                    W[-1] += self.step * np.dot(z[self.hidden_layer - 1].T, delta[0])
                    B[-1] += self.step * np.sum(delta[0], axis=0)
                elif layer == 0:  # first hidden layer weight update
                    delta.insert(0, z[layer] * (1 - z[layer]) * (np.dot(delta[0], W[layer + 1].T)))
                    W[layer] += self.step * np.dot(x.T, delta[0])
                    B[layer] += self.step * np.sum(delta[0], axis=0)
                else:  # other hidden layers ...
                    delta.insert(0, z[layer] * (1 - z[layer]) * (np.dot(delta[0], W[layer + 1].T)))
                    W[layer] += self.step * np.dot(z[layer - 1].T, delta[0])
                    B[layer] += self.step * np.sum(delta[0], axis=0)

    def evaluate(self):
        z=self.z[-1]
        #fixme 把损失函数换成cross entropy
        L = 0.5 * np.sum((z -self.y) ** 2)
        y_pred = np.argmax(z, axis=1)
        y_true = np.argmax(self.y, axis=1)

        acc = accuracy_score(y_true, y_pred)
        print("L = %f, acc = %f" % (L, acc))

def show_result(nn,x):
    y_res = nn.forward(x)
    #print(y_res) #实现softmax概率输出
    y_pred = np.argmax(y_res, axis=1)
    marker_style = []
    label_type = []
    color = []
    for i in range(len(y_pred)):
        if y_pred[i] == 0:
            marker_style.append('o')
            label_type.append('first cluster')
            color.append('blue')
        else:
            marker_style.append('^')
            label_type.append('second cluster')
            color.append('green')
    for i in range(len(y_pred)):
        if y[i] != y_pred[i]:  # 对于误分类点给一个新的标签实现可视化
            y_pred[i] = -2
            color[i] = 'red'

    # plot data
    plt.scatter(x[:, 0], x[:, 1], c=y, cmap=plt.cm.Spectral)
    plt.title("ground truth")
    plt.show()

    for i in range(len(y_pred)):
        plt.scatter(x[i, 0], x[i, 1], c=color[i], marker='o')
    plt.title("predicted" + "\n" + "Red: misclassification points")
    plt.show()

# generate sample data
# 使用的数据集是相互交叉的
#x 是保存坐标的二维array，y 是保存真实标签的一维数组
np.random.seed(0)
x, y = datasets.make_moons(200, noise=0.20)

# generate nn output target
t = np.zeros((x.shape[0], 2))
t[np.where(y==0), 0] = 1
t[np.where(y==1), 1] = 1

nn = NeuralNetwork()
nn.init_weight()
nn.BP(x, t, 2000)
show_result(nn,x)

#设置相似的参数，用sklearn的方法对比
from sklearn.neural_network import MLPClassifier
mlp = MLPClassifier(solver='adam',hidden_layer_sizes=(8, 4),
                    random_state=1,max_iter=3000)
mlp.fit(x,y)
print("sklearn score: ",mlp.score(x, y))