# 工具包
import numpy as np
# 数据预处理的包
from utils.features import prepare_for_training


class LinearRegression:

    def __init__(self, data, labels, polynomial_degree=0, sinusoid_degree=0, normalize_data=True):
        """
        1. 对数据进行预处理操作
        2. 先得到所有的特征个数
        3. 初始化参数矩阵θ
        :param data: 原始数据，x值
        :param labels: 标签，y值
        :param polynomial_degree: 多项式特征变换
        :param sinusoid_degree: 正弦特征变换
        :param normalize_data: 是否预处理
        """

        # 对数据进行预处理
        # data_processed处理后数据，features_mean数据的平均值u，features_deviation数据标准差
        data_processed, features_mean, features_deviation = prepare_for_training(
            data, polynomial_degree, sinusoid_degree)

        self.data = data_processed      # 更换成处理后的数据
        self.labels = labels
        self.features_mean = features_mean
        self.features_deviation = features_deviation
        self.polynomial_degree = polynomial_degree
        self.sinusoid_degree = sinusoid_degree
        self.normalize_data = normalize_data

        num_feature = self.data.shape[1]            # 列数，即特征个数
        self.theta = np.zeros((num_feature, 1))     # 初始化权重θ(已转置)

    def train(self, alpha, num_iterations=500):
        """
        训练模块，执行梯度下降
        :param alpha: 学习率
        :param num_iterations: 迭代次数
        :return: 训练参数，损失记录
        """
        cost_history = self.gradient_descent(alpha, num_iterations)
        return self.theta, cost_history

    def gradient_descent(self, alpha, num_iterations):
        """
        实际迭代模块，会迭代 num_iterations次
        梯度下降，返回给 train
        :return: 损失历史/目标历史
        """
        cost_history = []   # 损失值/目标值
        for _ in range(num_iterations):
            self.gradient_step(alpha)
            cost_history.append(self.cost_function(self.data, self.labels))
        return cost_history

    def gradient_step(self, alpha):
        """
        梯度下降实际更新参数时的计算步骤(矩阵运算)，返回给 gradient_descent
        """
        num_examples = self.data.shape[0]     # 全样本个数
        prediction = LinearRegression.hypothesis(self.data, self.theta)     # 预测值
        delta = prediction - self.labels    # 预测值减去真实值（无所谓顺序，最后要平方）(行向量)
        theta = self.theta
        theta = theta - alpha * (1 / num_examples) * (np.dot(delta.T, self.data)).T     # 批量梯度下降
        self.theta = theta

    # @staticmethod：用来定义静态方法的装饰器。
    # 静态方法指与类相关联但不涉及实例的方法，因此它们不会接收类实例作为第一个参数（通常命名为 self）。
    # 相反，静态方法只接收传递给它们的参数，并且可以通过类名直接调用，而无需创建类的实例
    @staticmethod
    def hypothesis(data, theta):
        """
        计算预测值，返回给 gradient_step
        :return: 预测值(行向量)
        """
        predictions = np.dot(data, theta)       # 计算两个数组的点积(点乘)
        return predictions

    def cost_function(self, data, labels):
        """
        损失函数/目标函数计算方法
        :return: 损失值/目标值
        """
        num_examples = data.shape[0]
        delta = LinearRegression.hypothesis(self.data, self.theta) - labels
        cost = (1/2) * np.dot(delta.T, delta) / num_examples
        return cost[0][0]

    # 完善，不算线性回归，用作展示
    def get_cost(self, data, labels):
        data_processed = (
            prepare_for_training(data, self.polynomial_degree, self.sinusoid_degree, self.normalize_data)
        )[0]
        return self.cost_function(data_processed, labels)

    # 完善，不算线性回归，用作展示
    def predict(self, data):
        """
        用训练好的参数模型，预测得到回归值结果
        """
        data_processed = (
            prepare_for_training(data, self.polynomial_degree, self.sinusoid_degree, self.normalize_data)
        )[0]
        predictions = LinearRegression.hypothesis(data_processed, self.theta)
        return predictions
