# coding=utf-8
import numpy as np
import random
import matplotlib.pyplot as plt

def softmax_regression(theta, x, y, iters, alpha):
    # TODO: Do the softmax regression by computing the gradient and 
    # the objective function value of every iteration and update the theta

    #各个数据的维度: x[m,n] y[k,m] theta[k,n];
    m, n = x.shape
    k, m = y.shape
    # 给theta一个合理初始值，提升训练的速度;
    theta = np.ones([k, n], float) * (1 / n)
    loss_trend = [] # 保存每次迭代的loss值
    for epoch in range(iters):  # 进行iters次迭代;
        # 利用当前模型进行预测:
        z = np.dot(x, theta.T)  # score[m,k];
        # 利用softmax函数计算出各个结果的概率:
        E = np.exp(z)
        y_proba = E / np.sum(E, axis = 1, keepdims = True) # E/(E按行求和)，得到各个类别的概率[m,k];
        # 计算梯度:
        Batch_Size = 60000
        batches = list(range(m // Batch_Size))
        # 打乱训练的顺序:
        random.shuffle(batches)
        for i in batches: # 分批处理;
            begin = i * Batch_Size
            end = begin + Batch_Size
            grad = (1 / m) * np.dot((y_proba.T - y)[:, begin:end], x[begin:end, :])
            #梯度下降:
            theta = theta - alpha * grad
        # 统计损失：
        y_proba = E / np.sum(E, axis = 1, keepdims = True)
        loss = (-1 / m) * np.sum(y * np.log(y_proba.T))
        loss_trend.append(loss)
        if epoch == 0:
            print(f"批处理大小：Batch_Size = {Batch_Size}")
        if epoch == 0 or (epoch + 1)  % 20 == 0:
            print(f"第{epoch + 1}次迭代: 损失loss = {loss}")
        # 计算梯度(不使用批处理):
        # grad = (1 / m) * np.dot((y_proba.T - y), x)
        # # 梯度下降:
        # theta = theta - alpha * grad
    plt.plot(range(iters), loss_trend, color='orange', marker=' ', linestyle='dashed', linewidth = 1, markersize = 4)
    plt.xlabel('epoch')
    plt.ylabel('loss')
    plt.title('loss_trend')
    plt.show()
    return theta