"""
逻辑回归
"""
import numpy as np
# 优化梯度下降函数 minimize
from scipy.optimize import minimize
from features import prepare_for_training


class LogisticRegression:

    def __init__(self, data, labels, polynomial_degree=0, sinusoid_degree=0, normalize_data=True):
        """
        1. 对数据进行预处理操作
        2. 先得到所有的特征个数
        3. 初始化参数矩阵 θ
        :param data: 原始数据，x值
        :param labels: 标签，y值
        """

        # 对数据进行预处理
        # data_processed 处理后数据，features_mean 数据的平均值u，features_deviation 数据标准差
        # normalize_data=False 是否标准化
        data_processed, features_mean, features_deviation = prepare_for_training(
            data, polynomial_degree, sinusoid_degree, normalize_data)

        self.data = data_processed      # 更换成处理后的数据
        self.labels = labels
        self.unique_labels = np.unique(labels)      # 标签的类别
        self.features_mean = features_mean
        self.features_deviation = features_deviation
        self.polynomial_degree = polynomial_degree
        self.sinusoid_degree = sinusoid_degree
        self.normalize_data = normalize_data

        self.num_feature = self.data.shape[1]            # 列数，即特征个数
        self.num_unique_label = self.unique_labels.shape[0]     # 有多少类别
        self.theta = np.zeros((self.num_feature, self.num_unique_label))    # 每种类别所对应的权重

    def train(self, max_iter=1000):
        loss_histories = []
        # 计算每个类别
        for label_index, unique_label in enumerate(self.unique_labels):
            current_initial_theta = np.copy(self.theta[:, label_index])     # 当前初始 θ
            current_labels = (self.labels == unique_label).astype(int)    # 当前类标签 (0, 1)
            # 计算该类 标签的 θ 值 和 损失函数 J(θ) 值
            current_theta, loss_history = LogisticRegression.gradient_descent(
                self.data, current_labels, current_initial_theta, max_iter)
            self.theta[:, label_index] = current_theta
            loss_histories.append(loss_history)
        return self.theta, loss_histories

    @staticmethod
    def gradient_descent(data, labels, theta, max_iter):
        loss_history = []
        result = minimize(
            # 要优化的目标（损失函数）
            lambda current_theta: LogisticRegression.loss_function(data, labels, current_theta),
            # 初始化的 θ 值，选择优化策略
            theta, method='CG',
            # 梯度下降迭代计算公式
            jac=lambda current_theta: LogisticRegression.gradient_step(data, labels, current_theta),
            # 迭代次数
            options={'maxiter': max_iter},
            # 记录结果
            callback=lambda current_theta: loss_history.append(
                LogisticRegression.loss_function(data, labels, current_theta)),
        )
        if not result.success:
            raise ArithmeticError('Can not minimize loss function' + result.message)
        optimized_theta = result.x
        return optimized_theta, loss_history

    @staticmethod
    def loss_function(data, labels, theta):
        """
        对数似然 l(θ) = y_is + y_not_is
        损失函数 J(θ) = - (1 / m) * l(θ)
        """
        num_examples = data.shape[0]
        predictions = LogisticRegression.hypothesis(data, theta).reshape(num_examples, 1)
        y_is = labels[labels == 1] @ np.log(predictions[labels == 1])
        y_not_is = (1 - labels[labels == 0]) @ np.log(1 - predictions[labels == 0])
        loss = - (1 / num_examples) * (y_is + y_not_is)
        return loss

    @staticmethod
    def hypothesis(data, theta):
        predictions = 1 / (1 + np.exp(- data @ theta))
        return predictions

    @staticmethod
    def gradient_step(data, labels, theta):
        num_examples = data.shape[0]
        predictions = LogisticRegression.hypothesis(data, theta).reshape(num_examples, 1)
        gradients = (1 / num_examples) * (predictions - labels).T @ data
        # flatten() 降维函数，用于将数组一维展开
        return gradients.flatten()

    def predict(self, data):
        data_processed = prepare_for_training(
            data, self.polynomial_degree, self.sinusoid_degree, self.normalize_data)[0]
        odds = LogisticRegression.hypothesis(data_processed, self.theta)
        # 比较每一行的元素，返回每行最大值的列索引，keepdims 保留原格式
        max_odds_index = np.argmax(odds, axis=1, keepdims=True)
        # 创建一个与数组 max_odds_index 形状相同、数据类型为 Python 对象的空数组
        class_prediction = np.empty(max_odds_index.shape, dtype=object)
        for index, label in enumerate(self.unique_labels):
            class_prediction[max_odds_index == index] = label
        return class_prediction
