import numpy as np
import matplotlib.pyplot as plt 
import math
from sklearn.datasets import load_digits
from sklearn.metrics import accuracy_score
from sklearn.metrics import confusion_matrix
digits = load_digits()
x=digits.data
ylabel=digits.target
# plot the digits
fig = plt.figure(figsize=(6, 6))  # figure size in inches
fig.subplots_adjust(left=0, right=1, bottom=0, top=1, hspace=0.05, wspace=0.05)

# plot the digits: each image is 8x8 pixels
for i in range(64):
    ax = fig.add_subplot(8, 8, i + 1, xticks=[], yticks=[])
    ax.imshow(digits.images[i], cmap=plt.cm.binary)
    
    # label the image with the target value
    ax.text(0, 7, str(digits.target[i]))
print(digits)
def sigmoid(x):
    return 1/(1+np.exp(-x))
class Logistic:
    #定义逻辑回归的类
    def __init__(self,data,label):
        self.data=data
        self.label=label
        self.alpha=0.05
        self.data_number,self.n=np.shape(data)
        self.weight=np.ones((10,self.n))
        self.b=np.zeros(10)
    #进行多分类的训练
    def train(self):
        for num in range(10):
            # 重新打标签
            label=np.copy(self.label)
            for i in range(self.data_number):
                if label[i] == num:
                    label[i] = 1
                else:
                    label[i] = 0
            #开始梯度上升
            for i in range(500):
                error_last=math.inf
                data_index=list(range(self.data_number))
                for j in range(self.data_number):
                    random_index=int(np.random.uniform(0,len(data_index)))
                    error=label[random_index]-sigmoid(np.dot(self.data[random_index],self.weight[num])+self.b[num])
                    self.weight[num]+=self.alpha*error*self.data[random_index]
                    self.b[num]+=self.alpha*error
                    del[data_index[random_index]]
                    if abs(error_last-error)<0.000001:
                        break
                    error_last=error
     #编辑预测函数
    def predict(self,predict_data):
        label=np.zeros(np.shape(predict_data)[0])
        result=np.zeros(10)
        result=list(result)
        #将10组权重取值与数据结合，概率最大的为结果
        for i in range(len(label)):
            for j in range(10):
                result[j]=sigmoid(sum(self.weight[j, :] * predict_data[i, :] + self.b[j]))
            label[i]=result.index(max(result))
        return label
#分配训练数据和测试数据，分别占比80%和20%
N = len(x)
N_train = int(N*0.8)
N_test = N - N_train
x_train=x[:N_train,:]
y_train=ylabel[:N_train]
x_test=x[N_train:, :]
y_test=ylabel[N_train:]
logistic=Logistic(x_train,y_train)
logistic.train()
train_result=logistic.predict(x_train)
test_result=logistic.predict(x_test)
z1=accuracy_score(train_result,y_train)
z2=accuracy_score(test_result,y_test)
from sklearn.linear_model import LogisticRegression
digits = load_digits()
fig = plt.figure(figsize=(6, 6))
fig.subplots_adjust(left=0, right=1, bottom=0, top=1, hspace=0.05, wspace=0.05)
k=0
for i in range(len(test_result)):
    if test_result[i]!=y_test[i]:
        k+=1
        ax=fig.add_subplot(8,8,k,xticks=[],yticks=[])
        ax.imshow(digits.images[i],cmap=plt.cm.binary)
        ax.text(0,7,str(digits.target[i]))
        ax.text(0,2,str(int(test_result[i])))#左下角为真实结果，左上角为模型预测结果
plt.show()