# 测试多分类的逻辑回归模型
# 1. 导入依赖
import numpy as np
from utils.hypothesis import sigmoid
from scipy.optimize import minimize  # 用于求解最小化多维标量函数的问题
from utils.features import prepare_for_training


class LogicRegressionUtils:
    # 2. 对数据集进行数据处理操作,polynomial_degree为多项式的阶数,sinusoid_degree为正弦函数的阶数,normalize_data为是否对数据进行归一化
    def __init__(self,data, labels,  polynomial_degree=0, sinusoid_degree=0,normalize_data=False):
        # 调用utils.features.prepare_for_training函数对数据进行预处理
        # data为输入数据,对数据进行预处理
        (self.data, self.features_mean, self.features_std) = prepare_for_training(data, polynomial_degree,
                                                                                  sinusoid_degree, normalize_data)
        # 将标签值放入labels属性中(list)
        self.labels = labels
        self.polynomial_degree = polynomial_degree
        self.sinusoid_degree = sinusoid_degree
        self.normalize_data = normalize_data
        # 定义属性unique_labels(3*1)，用于存储标签的种类(3*1的矩阵)
        self.unique_labels = np.unique(self.labels).reshape(-1, 1)
        # 定义属性theta(n*3)，用于存储线性回归的参数，初始化为一个n*3的矩阵的零向量(n*3)
        self.theta = np.zeros((self.unique_labels.shape[0],self.data.shape[1] ))



    # 3. 定义训练函数
    def train(self, learning_rate=0.1, num_iterations=1000):
        # 定义一个列表，用于存储每轮迭代的损失函数值
        loss_histories = []
        # 对标签的每个类别进行操作
        for type_index in range(self.unique_labels.shape[0]):
            # 获取该维度的theta_type(n*1)
            theta_type = self.theta[type_index, :].reshape(-1, 1)
            # label_type(m*1):对y值进行分类操作，属于该type为1，否则为0(m*1的矩阵)
            label_type = (self.labels == self.unique_labels.flatten()[type_index]).astype(int).reshape(-1, 1)
            # 执行梯度下降函数
            loss_history = self.gradient_descent(theta_type, num_iterations, label_type,type_index)
            loss_histories.append(loss_history)
        # 返回每轮迭代的损失值列表
        return loss_histories

    # 4. 定义梯度下降函数
    # theta_type(n*1的矩阵),num_iterations(迭代次数),label_type(m*1的矩阵)
    def gradient_descent(self, theta_type, num_iterations, label_type , type_index):
        # 定义列表，存储每轮迭代的损失值
        loss_history = []
        # 使用minimize函数进行优化Shadows name 'theta_type' from outer scope
        result = minimize(
            lambda current_theta_type: self.cost_function(current_theta_type, self.data, label_type),    # 损失函数
            theta_type.flatten(),    # , # theta初始值
            method='CG',    # 使用优化策略
            jac=lambda current_theta_type: self.gradient_step(current_theta_type, self.data, label_type),    # 梯度函数
            options={'maxiter': num_iterations},    # 最大迭代次数
            callback=lambda current_theta_type: loss_history.append(self.cost_function(current_theta_type, self.data, label_type)) # 记录每轮迭代的损失值
        )
        if not result.success:
            print(result.message)
        # 存储优化后的theta值
        self.theta[type_index, :] = result.x.flatten()
        # 返回每轮迭代的损失值
        return loss_history

    # 5. 定义损失函数(交叉熵损失函数)
    # theta_type(n*1的矩阵),data(m*n的矩阵),label_type(m*1的矩阵)
    def cost_function(self, theta_type, data, label_type):
        # 计算预测值（m*1的矩阵）
        predictions = self.predict(data, theta_type.reshape(-1, 1))
        # 计算损失值
        y_in_set_cost = np.dot(label_type[label_type == 1].T, np.log(predictions[label_type == 1]))
        y_not_in_set_cost = np.dot(1 - label_type[label_type == 0].T, np.log(1 - predictions[label_type == 0]))
        return (-1 / data.shape[0])*(y_in_set_cost + y_not_in_set_cost)

    # 6. 定义预测函数,返回m*3的矩阵
    # data(m*n的矩阵),theta(n*3的矩阵)
    def predict(self, data, theta):
        # 调用utils.hypothesis.sigmoid函数进行预测
        return sigmoid(np.dot(data, theta))

    # 7. 定义梯度函数,返回m*1的矩阵
    # theta_type(n*1的矩阵),data(m*n的矩阵),label_type(m*1的矩阵)
    def gradient_step(self, theta_type, data, label_type):
        theta_type = theta_type.reshape(-1, 1)
        # 计算预测值(m*1)
        predictions = self.predict(data, theta_type)
        # 计算梯度(梯度下降函数)
        gradient = np.dot(data.T, (predictions - label_type)) / data.shape[0]
        return gradient.flatten()

    # 8. 定义预测函数，返回预测的所有标签值（list）
    def predict_label(self, data ):
        data = prepare_for_training(data, self.polynomial_degree, self.sinusoid_degree, self.normalize_data)[0]
        # 调用predict函数进行预测(m*3的矩阵),theta(3*n的矩阵),data(m*n的矩阵)
        predictions = self.predict(data, self.theta.T)
        # 定义空矩阵，用于存储预测的标签值
        predicted_labels = np.empty(predictions.shape[0],dtype=object)
        # 取每一行的最大值的下标
        max_prob_index = np.argmax(predictions, axis=1)
        # 遍历每一行，取最大值对应的标签值
        for index, label in enumerate(self.unique_labels.flatten()):
            predicted_labels[max_prob_index == index] = label
        # 返回预测的标签值
        return predicted_labels.tolist()
