#!/usr/bin/env python
# coding: utf-8

# In[2]:


"""Ovr 多分类方法"""

"""加载数据"""
from sklearn.datasets import load_digits # 加载数据
import matplotlib.pyplot as plt # 可视化

# 加载数据
digits = load_digits()

# 手写数字可视化
fig = plt.figure(figsize=(6, 6))  
fig.subplots_adjust(left=0, right=1, bottom=0, top=1, hspace=0.05, wspace=0.05)

for i in range(64):
    ax = fig.add_subplot(8, 8, i + 1, xticks=[], yticks=[])
    ax.imshow(digits.images[i], cmap=plt.cm.binary)

    ax.text(0, 7, str(digits.target[i]))
plt.show()

#X=digits.data#元数据集
#y=digits.target#标签，每个数字的真实类别
#print(np.unique(y))  # 看下有几类标签
#X.shape, y.shape


# In[3]:


"""Ovr算法"""
import numpy as np
from scipy.optimize import minimize 

# sigmoid函数
def sigmoid(z):
    return 1 / (1 + np.exp(-z))

# 损失cost函数
def cost(theta, X, y, l):
    retheta = theta[1:]
    u = (-y*np.log(sigmoid(X@theta)+1e-5)) + (y-1)*np.log(1-sigmoid(X@theta)+1e-5)
    r = (retheta@retheta)*l / (2*len(X))
    return np.mean(u) + r

# 梯度grad函数
def grad(theta, X, y, l):
    retheta = theta[1:]
    u = (1 / len(X)) * X.T @ (sigmoid(X @ theta) - y)
    # 人为插入一维0，使得对theta_0不惩罚，方便计算
    r = np.concatenate([np.array([0]), (l / len(X)) * retheta])
    return u + r


# 一对多Ovr分类器
def onevsrest(X, y, learn, num):
    atheta = np.zeros((num, X.shape[1])) 
    
    for i in range(1, num+1):
        theta = np.zeros(X.shape[1]) 
        y1 = np.array([1 if label == i else 0 for label in y])   # 将y转换成0和1，即是y类和不是y类
        r = minimize(fun=cost, x0=theta, args=(X, y1, learn), method='TNC',jac=grad, options={'disp': True})
        # 选择minimize函数做梯度下降
        atheta[i-1,:] = r.x
                         
    return atheta

# 预测predict函数
def predict(X, all_theta):
    h = sigmoid(X @ all_theta.T)  # 这里的all_theta需要转置
    h_argmax = np.argmax(h, axis=1) # 取每列最大的概率的index加1就是所预测的类别
    h_argmax = h_argmax + 1
    return h_argmax



X0=digits.data#元数据集
y0=digits.target#标签，每个数字的真实类别
X = np.insert(X0, 0, 1, axis=1) # (5000, 401) 添加偏置列
y = y0.flatten()  # 这里消除了一个维度，方便后面的计算 or .reshape(-1) （5000，）
all_theta = onevsrest(X, y, 1, 10)
# all_theta每一行是一个分类器的一组参数
y_pred = predict(X, all_theta)
accuracy = np.mean(y_pred == y)

print ('正确率 = {0}%'.format(accuracy * 100))


# In[4]:


"""预测分类结果的可视化"""

# 可视化图像参数设定
fig1=plt.figure(figsize=(6, 6))  # figure size in inches
fig1.subplots_adjust(left=0, right=1, bottom=0, top=1, hspace=0.05, wspace=0.05)

for i in range(64):
    ax = fig1.add_subplot(8, 8, i + 1, xticks=[], yticks=[])
    ax.imshow(np.abs(X0[i].reshape(8, 8)),cmap=plt.cm.binary,interpolation='nearest')
    ax.text(0,1,str(y0[i]))
    ax.text(0,7,str(y_pred[i]))
plt.show()
# 上标为真实标签，下标为预测标签


# In[6]:


"""分类结果错误图片的可视化"""

fig2=plt.figure(figsize=(6, 6))  # figure size in inches
fig2.subplots_adjust(left=0, right=1, bottom=0, top=1, hspace=0.05, wspace=0.05)
num=0
# 错误图片可视化且表明位置
for i in range(64):
    if(y0[i]!=y_pred[i]):
        num=num+1
        ax = fig2.add_subplot(8, 8, i + 1, xticks=[], yticks=[])
        ax.imshow(np.abs(X0[i].reshape(8, 8)),cmap=plt.cm.binary,interpolation='nearest')
        ax.text(0,1,str(y0[i]))
        ax.text(0,7,str(y_pred[i]))
plt.show()
print(num)# 图片中出现错误的个数


# In[ ]:




