import numpy as np
from sklearn import datasets, linear_model
import matplotlib.pyplot as plt
from sklearn.metrics import accuracy_score
from sklearn.datasets import load_digits
'''
此程序主要是在之前的基础之上将函数封装成类
所利用的数据是digits
在反复调试神经网络的参数之后得到了一个局部的优解
'''
def sigmoid(X):
    return 1.0/(1+np.exp(-X))

class NeuralNetwork:
    def __init__(self):
        self.iteration=102
        self.step=0.001029
        # set the nn parameters
        self.input_dim = np.shape(x)[1] # 64 points
        self.hidden_layer = 1
        self.hidden_dim = np.array([200])
        self.output_dim = 10 #from 0~9

    def init_weight(self): # 初始化权重 只会使用一次
        W=[]
        B=[]
        for layer in range(self.hidden_layer + 1):
            if layer == 0:  # first hidden layer
                W.append(np.random.randn(self.input_dim, self.hidden_dim[0]) \
                            / np.sqrt(self.input_dim))
                B.append(np.zeros((1, self.hidden_dim[0])))
            elif layer == self.hidden_layer:  # output layer
                W.append(np.random.randn(self.hidden_dim[layer - 1], self.output_dim) \
                            / np.sqrt(self.hidden_dim[layer - 1]))
                B.append(np.zeros((1, self.output_dim)))
            else: # other hidden layers
                W.append(np.random.randn(self.hidden_dim[layer - 1], self.hidden_dim[layer]) \
                            / np.sqrt(self.hidden_dim[layer - 1]))
                B.append(np.zeros((1, self.hidden_dim[layer])))
            self.W=W
            self.B=B

    def forward(self,x): #前向计算 每次循环的开头都需要使用他和weight matrix计算当前输出
        z = []
        for layer in range(self.hidden_layer + 1):
            if layer == 0:
                z.append(sigmoid(np.dot(x, self.W[0]) + self.B[0]))
            else:
                z.append(sigmoid(np.dot(z[-1], self.W[layer]) + self.B[layer]))
        self.z=z
        # 使用softmax 函数将输出变成概率值，搭配cross entropy损失函数
        temp = np.sum(np.exp(z[-1]), axis=1)
        temp = np.array([temp]).T  # 求和的结果生成二维列向量
        z[-1] = np.exp(z[-1]) / temp
        return z[-1]

    def BP(self,x,y,iteration=None,step=None):
        # 在函数输入处留给自定义步长和迭代次数的空间，没有自定义则使用默认参数
        if not iteration:
            iteration=self.iteration
        if not step:
            step=self.step
        # 输入矩阵 x 和真实标签 y
        self.x=x
        self.y=y

        for i in range(iteration):
            self.forward(x) # 正向计算
            self.evaluate() # 精度评估
            #将前向计算得到的结果和上一轮的权值更新加入local variable
            W=self.W
            B=self.B
            z=self.z
            delta=[]
            for layer in range(self.hidden_layer, -1, -1):
                # 循环的次数应该是hidden_layer+1,可以和前面循环的W B 下标对应
                if layer == self.hidden_layer:  # output layer weight update
                    delta.append(z[-1] * (1 - z[-1]) * (y - z[-1]))
                    W[-1] += self.step * np.dot(z[self.hidden_layer - 1].T, delta[0])
                    B[-1] += self.step * np.sum(delta[0], axis=0)
                elif layer == 0:  # first hidden layer weight update
                    delta.insert(0, z[layer] * (1 - z[layer]) * (np.dot(delta[0], W[layer + 1].T)))
                    W[layer] += self.step * np.dot(x.T, delta[0])
                    B[layer] += self.step * np.sum(delta[0], axis=0)
                else:  # other hidden layers ...
                    delta.insert(0, z[layer] * (1 - z[layer]) * (np.dot(delta[0], W[layer + 1].T)))
                    W[layer] += self.step * np.dot(z[layer - 1].T, delta[0])
                    B[layer] += self.step * np.sum(delta[0], axis=0)

    def evaluate(self):
        z=self.z[-1]
        #fixme 把损失函数换成cross entropy
        L = 0.5 * np.sum((z -self.y) ** 2)
        y_pred = np.argmax(z, axis=1)
        y_true = np.argmax(self.y, axis=1)

        acc = accuracy_score(y_true, y_pred)
        print("L = %f, acc = %f" % (L, acc))

def showImage(nn,rawX, k, y_true):
    y_res = nn.forward(raw_X)#得到最终输出的y并将他转回成1维数组
    y_pred=[]
    for i in range(np.shape(y_res)[0]):
        y_pred.append(np.argmax(y_res[i]))
    index = np.random.choice(len(rawX),k*k)
    color = ["Greys_r", "Reds_r"]
    #使用subplots画多图，包含了多个axes
    #sharex sharey代表所有子图共享横轴纵轴
    fig, ax = plt.subplots(k, k, sharex=True, sharey=True, figsize=(k, k))
    #协调每个子图之间的距离
    fig.subplots_adjust(wspace=0.5, hspace=0.5)
    for i in range(k):
        for j in range(k):
            #根据index值找到相应的图片和标签
            image = rawX[index[i * k + j], :]
            label = y_pred[index[i * k + j]]
            x=0#默认正确的颜色显示是灰色
            if label!=y_true[index[i * k + j]]:
                x=1#错误则显示为红色
            ax[i][j].set_xlabel(label)
            ax[i][j].imshow(image.reshape(8, 8), cmap=color[x])
    plt.xticks([])  # 去掉横坐标值
    plt.yticks([])  # 去掉纵坐标值
    plt.savefig('Classification Result.jpg')
    plt.show()

# generate sample data
data = load_digits()
raw_X = data.data#坐标值
raw_y = data.target#标签值
#让X的特征值均位于0~1
raw_X-=raw_X.min()
raw_X/=raw_X.max()
x=raw_X
#将y转换成（1797，10）的数组，对比10个神经元的输出
y=np.zeros((np.shape(raw_y)[0],10))
for i in range(np.shape(y)[0]):
    y[i][raw_y[i]]=1

np.random.seed(0)
nn = NeuralNetwork()
nn.init_weight()
nn.BP(x, y)
showImage(nn,raw_X,8,raw_y)