import numpy as np
import scipy.io as sio
import matplotlib.pyplot as plt
from scipy.optimize import minimize

# 读入文件
data = sio.loadmat('ex3data1.mat')
print(data)
# {'__header__': b'MATLAB 5.0 MAT-file, Platform: GLNXA64, Created on: Sun Oct 16 13:09:09 2011',
# '__version__': '1.0',
# '__globals__': [],
# 'X': array([[0., 0., 0., ..., 0., 0., 0.],
#        [0., 0., 0., ..., 0., 0., 0.],
#        [0., 0., 0., ..., 0., 0., 0.],
#        ...,
#        [0., 0., 0., ..., 0., 0., 0.],
#        [0., 0., 0., ..., 0., 0., 0.],
#        [0., 0., 0., ..., 0., 0., 0.]]),
# 'y': array([[10],
#        [10],
#        [10],
#        ...,
#        [ 9],
#        [ 9],
#        [ 9]], dtype=uint8)}
#
# Process finished with exit code 0

print(data.keys())
# dict_keys(['__header__', '__version__', '__globals__', 'X', 'y'])

raw_X = data['X']
raw_y = data['y']
print(raw_X.shape)
# (5000, 400)
print(raw_y.shape)
# (5000, 1)


# 查看图片
def plot_an_image(X):
    pick_one = np.random.randint(5000)  # 随机取一张图片
    image = X[pick_one, :]  # 取图片，列数不变

    # 打印图片
    fig, ax = plt.subplots(figsize=(1, 1))
    ax.imshow(image.reshape(20, 20).T, cmap='gray_r')  # 重新整理图片维度 20*20=400

    plt.xticks([])  # 取消刻度
    plt.yticks([])

    plt.show()


plot_an_image(raw_X)



# 打印100张t图片
def plot_100_image(X):
    sample_index = np.random.choice(len(X), 100)  # 5000张中选择100个
    images = X[sample_index, :]
    print(images.shape)
    # (100, 400)

    fig, ax = plt.subplots(ncols=10, nrows=10, figsize=(8, 8), sharex=True, sharey=True)  # 10*10个图片

    for r in range(10):
        for c in range(10):
            ax[r, c].imshow(images[10 * r + c].reshape(20, 20).T, cmap='gray_r')

    plt.xticks([])  # 取消刻度
    plt.yticks([])

    plt.show()


plot_100_image(raw_X)


def sigmoid(z):
    return 1 / (1 + np.exp(-z))


# 损失函数
def costFunction(theta, X, y, lamda):
    A = sigmoid(X @ theta)
    first = y * np.log(A)
    second = (1 - y) * np.log(1 - A)
    reg = theta[1:] @ theta[1:] * (lamda / (2 * len(X)))
    return -np.sum(first + second) / len(X) + reg


# 梯度下降
def gradient_reg(theta, X, y, lamda):
    reg = theta[1:] * (lamda / len(X))
    reg = np.insert(reg, 0, values=0, axis=0)

    first = (X.T@(sigmoid(X@theta) - y)) / len(X)
    return first + reg


X = np.insert(raw_X, 0, values=1, axis=1)
print(X.shape)
# (5000, 401)

y = raw_y.flatten()
print(y.shape)
# (5000,)


def one_vs_all(X, y, lamda, K):

    n = X.shape[1]

    theta_all = np.zeros((K, n))

    for i in range(1, K+1):  # 数字1-10
        theta_i = np.zeros(n,)  # 第i个分类器的参数

        res = minimize(fun=costFunction, x0=theta_i, args=(X, y == i, lamda), method='TNC', jac=gradient_reg)

        theta_all[i-1, :] = res.x

    return theta_all

lamda = 1
K = 10
theta_final = one_vs_all(X, y, lamda, K)
print(theta_final)


# 预测函数
def predict(X, theta_final):
    h = sigmoid(X @ theta_final.T)  # X:(5000,401)  theta:(10,401) => 转置：(5000,10)
    h_argmax = np.argmax(h, axis=1)
    return h_argmax + 1


y_pred = predict(X, theta_final)

acc = np.mean(y_pred == y)
print(acc)
# 准确率：0.9446
















