import numpy as np
from  utils.features import prepare_for_training

class LinearRegression:
    def __init__(self, feature, labels, features_mean,features_deviation,theta:np.ndarray,learn_rate:int=0.01,
                 polynomial_degree=0, sinusoid_degree=0, is_normalize=True):
        self.feature=feature
        self.labels=labels
        # 均值
        self.features_mean = features_mean
        # 标准差
        self.features_deviation = features_deviation
        #
        self.theta = theta
        #学习率
        self.learn_rate=learn_rate
        # 特征变换polynomial
        self.polynomial_degree=polynomial_degree
        # 特征变换sinusoidal
        self.sinusoid_degree=sinusoid_degree
        #
        self.is_normalize = is_normalize



    def train(self,num_iters):
        """
                    训练模块，执行梯度下降
        """
        cost_history = self.gradient_descent(num_iters)
        return self.theta, cost_history
        pass
    def gradient_descent(self,num_iters):
        """
                    实际迭代模块，会迭代num_iterations次
        """
        cost_history = []
        for _ in range(num_iters):
            self.gradient_step()
            cost_history.append(self.cost_function())
        return cost_history
    def gradient_step(self):
        """
                    梯度下降参数更新计算方法，注意是矩阵运算
                    需要注意的是 一维向量既可以看作列向量 也可以看作行向量 但是为了统一 一般认为delta是列向量
                    delta.T 看作是行向量
        """
        prediction = LinearRegression.hypothesis(self.feature, self.theta)
        # shape (124,1)
        delta = prediction - self.labels #预测值和标签的差
        theta = self.theta
        num_examples:int = self.feature.shape[0]
        #np.dot(delta.T, self.data)
        #公式的含义是第一列的特征值乘以对应的西塔  这个公式实际上将θ0和θ1一起算了 (1,124)*(124,2)
        self.theta = theta - (self.learn_rate * (1 / num_examples) )* (np.dot(delta.T, self.feature)).T

    def cost_function(self):
        """
                    损失计算方法
        """
        num_examples = self.feature.shape[0]
        delta = LinearRegression.hypothesis(self.feature,self.theta) - self.labels
        cost = (1/2)*np.dot(delta.T,delta)/num_examples # 这里计算的是目标函数的值 平方和
        return cost[0][0]

    @staticmethod
    def hypothesis(data:np.ndarray,theta):
        """
        :param data: 特征
        :param theta: θ
        :return: 返回预测值

        """
        predictions = np.dot(data,theta)
        return predictions
    def get_cost(self, data, labels):
        data_processed = prepare_for_training(data,
                                              self.polynomial_degree,
                                              self.sinusoid_degree,
                                              self.is_normalize
                                              )[0]

        return self.cost_function(data_processed, labels)

    def predict(self, data):
        """
                    用训练的参数模型，与预测得到回归值结果
        """
        return  prepare_for_training(data,
                                              self.polynomial_degree,
                                              self.sinusoid_degree,
                                              self.is_normalize
                                              )[0]