# -*- coding: utf-8 -*-
"""
Created on Tue Nov 17 14:20:31 2019

@author: XCL01
"""

from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import load_digits
#没有思路, 借鉴了同学的做法

def sigmoid(x):   #逻辑回归函数
    return 1.0 / (1 + np.exp(-x))

def imageshow(digits):# 绘图
    
    fig = plt.figure(figsize=(7, 7))  # figure size in inches
    fig.subplots_adjust(left=0, right=1, bottom=0, top=1, hspace=0.05, wspace=0.05)
    for i in range(test_num):
        ax = fig.add_subplot(np.sqrt(test_num), np.sqrt(test_num), i + 1,xticks=[], yticks=[])
        ax.imshow(digits.images[i], cmap=plt.cm.binary)
        ax.text(0, 1, str(digits.target[i]))    
    
class logistic():#逻辑回归
    
    def __init__(self,data,label):
        self.data=data
        self.label=label       
        self.data_num,n = np.shape(data)
        self.weights = np.ones(n)
        self.b = 1
        
    def train(self,iteration):
        '''随机梯度下降法
        data:训练数据集
        iteration:迭代次数
        '''
        for j in range(iteration):
            data_index = list(range(self.data_num))
            for i in range(self.data_num):
                np.random.seed(0)
                alpha = 0.01
                rand_index = int(np.random.uniform(0,len(data_index)))#随机序列，利于训练
                error = self.label[rand_index] - sigmoid(sum(self.data[rand_index]*self.weights + self.b))
                self.weights += alpha * error *self.data[rand_index]
                self.b += alpha * error
                del(data_index[rand_index])
        return self.weights,self.b
    def predict(self,predict_data):
        result = list(map(lambda x: 1 if sum(self.weights * x +self.b) > 0 else 0,predict_data))
        return np.array(result)

def multi_distribution(data,digits):#多分类
    
    data_num = np.shape(data)[0]
    max_data_label = np.ones(data_num)
    
    for j in range(0,data_num):
        P_distr = np.ones(target_num)
        for i in range(target_num):
            P_distr[i] = sigmoid(np.sum(weights[i] * data[j] + b[i]))       
        max_data_label[j] = np.argmax(P_distr)
    
    true = 0  #求精度 
    for i in range(data_num):        
        if max_data_label[i] == digits.target[i]:
            true +=1
        print("original data = {} , predict data = {}".format(digits.target[i],max_data_label[i]))   
    return true/data_num
    

if __name__ == '__main__':#主程序
    
    # 加载数据
    digits = load_digits()     
    target_num = 10#分类数
    
    train_num , test_num = 100 , 100#训练数，测试数(为了图片排列整齐，数据必须能开平方)
    
    iteration = 10000#训练次数
    imageshow(digits)#加载图片
    
    weights = np.ones((target_num,64))#权重，64个特征值
    b = np.ones((target_num,64))#偏移量
    
    #train    
    train_data = digits.data[0:train_num] 
    
    for i in range(target_num):#归一化，使得标签变为1/0，再依据普通二分类的原理进行train
        train_label = []
        for k in range(train_num):
            if i == digits.target[k]:
                train_label.append(1)
            if i != digits.target[k]:
                train_label.append(0)        
        Logistic = logistic(train_data , train_label)
        weights[i] , b[i] = Logistic.train(iteration)
        
    #test
    test_data = digits.data[0:test_num]
    precision=multi_distribution(test_data,digits)#精度
    print("final correct rate = {}".format(precision))