"""
对比了自己编程实现的逻辑回归多分类模型和通过sklearn实现的逻辑回归多分类模型，
调用sklearn实现模型的方法准确度相对较高，且计算速度快，实时性较好，程序简单易读。
"""

import numpy as np
import random
from sklearn.metrics import accuracy_score
import matplotlib.pyplot as plt
from sklearn.datasets import load_digits
from sklearn.model_selection import train_test_split
import math

random.seed(0)  # 设置全局随机种子

# 数据加载
digits = load_digits()
# 源数据绘图
fig = plt.figure(figsize=(6, 6))
fig.subplots_adjust(left=0, right=1, bottom=0, top=1, hspace=0.05, wspace=0.05)
for i in range(64):
    ax = fig.add_subplot(8, 8, i + 1, xticks=[], yticks=[])
    ax.imshow(digits.images[i], cmap=plt.cm.binary)
    ax.text(0, 7, str(digits.target[i]))
plt.figure(1)
plt.show()

x_train, x_test, y_train, y_test = train_test_split(digits.data,
                                                    digits.target,
                                                    train_size=0.7,
                                                    random_state=10)


# 自己编程实现Logistic Regression的多分类
def sigmoid(x):
    return 1 / (1 + np.exp(-1 * x))


class logistic:
    def __init__(self, data):
        self.data = data
        self.data_num = np.shape(data)[0]
        self.n = np.shape(data)[1]

    def train(self, k, label, max_d=150, arpha=0.01):
        self.label = label
        self.label_set = list(set(label))
        w = np.ones((k, self.n))
        b = np.ones(k)
        for ki in range(k):
            d = 0
            y = np.zeros((self.data_num, 1))
            while d < max_d:
                data_index = list(range(self.data_num))
                for i in range(self.data_num):
                    rand_index = random.sample(data_index, 1)[0]
                    if self.label[rand_index] == self.label_set[ki]:
                        y[rand_index] = 1
                    else:
                        y[rand_index] = 0
                    error = y[rand_index] - sigmoid(sum(w[ki] * self.data[rand_index]) + b[ki])
                    w[ki] = w[ki] + arpha * error * self.data[rand_index]
                    b[ki] = b[ki] + arpha * error
                    data_index.remove(rand_index)
                d = d + 1
        return w, b, self.label_set

    def test(self, w, b, test_label_set):
        k = np.shape(w)[0]
        test_result = np.zeros(self.data_num)
        for i in range(self.data_num):
            p_test = -1 * np.inf
            for ki in range(k):
                if sigmoid(sum(w[ki] * self.data[i]) + b[ki]) > p_test:
                    p_test = sigmoid(sum(w[ki] * self.data[i]) + b[ki])
                    test_result[i] = test_label_set[ki]
        return test_result

    def error_result(self, ture_label, test_label):
        error_index = []
        for i in range(len(ture_label)):
            if ture_label[i] != test_label[i]:
                error_index = error_index + [i]
        return error_index


w, b, train_label_set = logistic(x_train).train(k=10, label=y_train)
test_result = logistic(x_test).test(w, b, train_label_set)
train_result = logistic(x_train).test(w, b, train_label_set)
# 计算准确度
acc_train = accuracy_score(y_train, train_result)
acc_test = accuracy_score(y_test, test_result)
print("自己编程实现逻辑回归：\n训练集准确度:%f\n测试集准确度:%f" % (acc_train, acc_test))

# 对比自己实现与sklearn的方法的精度
from sklearn.linear_model import LogisticRegression

model = LogisticRegression(solver='liblinear', C=0.05, max_iter=150, multi_class='ovr',
                           random_state=0)
model.fit(x_train, y_train)
train_result = model.predict(x_train)
test_result = model.predict(x_test)
acc_train = accuracy_score(y_train, train_result)
acc_test = accuracy_score(y_test, test_result)
print("sklearn实现逻辑回归：\n训练集准确度:%f\n测试集准确度:%f" % (acc_train, acc_test))

