#!/usr/bin/env python
# coding: utf-8

# In[1]:


"""设计三层神经网络实现digits多分类"""

"""导入手写数据集"""

from sklearn.datasets import load_digits # 加载数据
import matplotlib.pyplot as plt # 可视化

# 加载数据
digits = load_digits()

# 手写数字可视化
fig = plt.figure(figsize=(6, 6))  
fig.subplots_adjust(left=0, right=1, bottom=0, top=1, hspace=0.05, wspace=0.05)

for i in range(64):
    ax = fig.add_subplot(8, 8, i + 1, xticks=[], yticks=[])
    ax.imshow(digits.images[i], cmap=plt.cm.binary)

    ax.text(0, 7, str(digits.target[i]))
plt.show()

data=digits.data#元数据集
target=digits.target#标签，每个数字的真实类别
#print(np.unique(y))  # 看下有几类标签
#X.shape, y.shape


# In[2]:


"""建立神经网络"""

import numpy as np
from sklearn import datasets, linear_model
from sklearn.metrics import accuracy_score
import matplotlib.pyplot as plt

# 定义激活函数
def sigmoid(x):
    return 1.0/(1+np.exp(-x))

# 激活函数一阶导数
def dsigmoid(x):
    return x*(1-x)
 
# 建立三层神经网络模型  
class NN_Model:
    def __init__(self,insize,hidsize,outsize):
        # 设置神经网络初始权重及偏置
        self.W1 = 0.01 * np.random.randn(insize,hidsize) # 输入层到隐藏层的权重
        self.b1 = np.zeros(hidsize) # 隐藏层偏置
        self.W2 = 0.01 * np.random.randn(hidsize,outsize)# 隐藏层到输出层的权重
        self.b2 = np.zeros(outsize) # 输出层偏置 
    
    def loss(self,X,y,reg = 0.01):
        # 定义损失函数，正反向传播计算公式
        num_train, num_feature = X.shape
        
        # 计算前向通路
        a1 = X  # 输入节点
        a2 = sigmoid(a1.dot(self.W1) + self.b1) # 输入节点到隐藏节点的输出
        a3 = sigmoid(a2.dot(self.W2) + self.b2) # 隐藏节点到输出节点的输出
        
        # 损失函数计算
        loss = - np.sum(y*np.log(a3) + (1-y)*np.log((1-a3)))/num_train 
        loss += 0.5 * reg * (np.sum(self.W1*self.W1)+np.sum(self.W2*self.W2)) / num_train
        
        #计算反向传播
        error3 = a3 - y
        dW2 = a2.T.dot(error3) + reg * self.W2
        db2 = np.sum(error3,axis=0)
        
        error2 = error3.dot(self.W2.T)*dsigmoid(a2) 
        dW1 = a1.T.dot(error2) + reg * self.W1     
        db1 = np.sum(error2,axis=0)
        
        dW1 /= num_train
        dW2 /= num_train
        db1 /= num_train
        db2 /= num_train
        
        return loss,dW1,dW2,db1,db2
    
    # 建立训练模型，实现权重更新
    def train(self,X,y,y_train,X_val,y_val,learn_rate=0.01,num_iters = 10000):
        #学习率为0.01
        allsize = 150
        ntrain = X.shape[0]
        loss_list = []
        accuracy_train = []
        accuracy_val = []
        
        for i in range(num_iters):
            allindex = np.random.choice(ntrain,allsize,replace=True)
            X_batch = X[allindex]
            y_batch = y[allindex]
            y_train_batch = y_train[allindex]
            
            loss,dW1,dW2,db1,db2 = self.loss(X_batch,y_batch)
            loss_list.append(loss)
            
            # 更新权重
            self.W1 += -learn_rate*dW1
            self.W2 += -learn_rate*dW2
            self.b1 += -learn_rate*db1
            self.b2 += -learn_rate*db2
            
            if i%500 == 0:
                print ("i=%d,loss=%f" %(i,loss))
                #record the train accuracy and validation accuracy
                train_acc = np.mean(y_train_batch==self.predict(X_batch))
                val_acc = np.mean(y_val==self.predict(X_val))
                accuracy_train.append(train_acc)
                accuracy_val.append(val_acc)
                
        return loss_list,accuracy_train,accuracy_val
    
    # 定义预测模型
    def predict(self,X_test):
        a2 = sigmoid(X_test.dot(self.W1) + self.b1)
        a3 = sigmoid(a2.dot(self.W2) + self.b2)
        y_pred = np.argmax(a3,axis=1)
        return y_pred
        
        
    pass


# In[3]:


"""训练模型并实现多分类预测"""

from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelBinarizer
import numpy as np

X = data
y = target

X_mean = np.mean(X,axis=0)# 数据标准化
X -= X_mean
 
X_data,X_test,y_data,y_test = train_test_split(X,y,test_size=0.2)

# 设置训练及验证数据
X_train = X_data[0:1000]
y_train = y_data[0:1000]
X_val = X_data[1000:-1]
y_val = y_data[1000:-1]
 
print (X_train.shape,X_val.shape,X_test.shape)
 
y_train_label = LabelBinarizer().fit_transform(y_train)

# 进行数据训练与验证
classify = NN_Model(X.shape[1],100,10)
 
print ('start')
 

loss_list,accuracy_train,accuracy_val = classify.train(X_train,y_train_label
                                                       ,y_train,X_val,y_val)


# In[5]:


"""分类精度"""

y_pred = classify.predict(X_test)
accuracy = np.mean(y_pred == y_test)
print ("the accuracy is ",accuracy)


# In[6]:


"""预测分类结果的可视化"""

# 可视化图像参数设定
fig1=plt.figure(figsize=(6, 6))  # figure size in inches
fig1.subplots_adjust(left=0, right=1, bottom=0, top=1, hspace=0.05, wspace=0.05)


for i in range(64):
    ax = fig1.add_subplot(8, 8, i + 1, xticks=[], yticks=[])
    ax.imshow(np.abs(X_test[i].reshape(8, 8)),cmap=plt.cm.binary,interpolation='nearest')
    ax.text(0,1,str(y_test[i]))
    ax.text(0,7,str(y_pred[i]))
plt.show()
# 上标为真实标签，下标为预测标签


# In[7]:


"""分类结果错误图片的可视化"""

fig2=plt.figure(figsize=(6, 6))  # figure size in inches
fig2.subplots_adjust(left=0, right=1, bottom=0, top=1, hspace=0.05, wspace=0.05)
num=0
# 错误图片可视化且表明位置
for i in range(64):
    if(y_test[i]!=y_pred[i]):
        num=num+1
        ax = fig2.add_subplot(8, 8, i + 1, xticks=[], yticks=[])
        ax.imshow(np.abs(X_test[i].reshape(8, 8)),cmap=plt.cm.binary,interpolation='nearest')
        ax.text(0,1,str(y_test[i]))
        ax.text(0,7,str(y_pred[i]))
plt.show()
print(num)# 图片中出现错误的个数


# In[ ]:




