from matplotlib import pyplot as plt
import numpy as np
import random


def print_Para(parameter):
    print("最终预测线性模型表达式为：\n y=", end=' ')
    for i in range(parameter.shape[0] - 1):
        print(round(parameter[i][0], 4), f"x{i + 1}+", end=' ')
    print(round(parameter[-1][0], 4))


class linear_regrassion():
    def __init__(self, X, Y, learningRate, epoch, show_plot):
        self.dataX = X  # Input matrix, col is data, row is feature
        self.dataY = Y
        self.learningRate = learningRate
        self.epoch = epoch
        self.show_plot = None
        self.y = np.array([self.dataY]).T
        self.loss_sequence = []
        self.check_point = 50
        # add a row of 1 to calculate bias
        self.X = np.column_stack((X, np.repeat(1, X.shape[0])))
        # init parameters with ones, also OK if init randomly
        self.parameter = np.array([np.ones(self.X.shape[1])]).T
        # Only plot images with one dimension X
        if X.shape[1] == 1:
            self.show_plot = show_plot
        elif (X.shape[1] > 1) and show_plot:
            raise Exception("Unable to draw image with too many variables!")

    def show_Linear_Regression(self, predict_y, range_i, loss):
        """
        plot images & output Loss
        """
        if (range_i + 1) % 20000 == 0:
            if self.show_plot:
                plt.clf()
                plt.scatter(self.dataX, self.dataY, color='blue')
                plt.plot(self.dataX, predict_y, color='red', linewidth=2)
                plt.pause(0.03)
            np.set_printoptions(precision=4)
        if (range_i + 1) % self.check_point == 0:
            print("range", range_i + 1, "loss =", loss)
            self.loss_sequence.append(loss[0][0])

    def fit(self, batch_size=None, show_loss=False, check_point=None):
        """
        train our model
        """
        if check_point: self.check_point = check_point
        if batch_size:
            if batch_size <= 0: raise ValueError("batch size must larger than ZERO!")
            p = self.miniBatch_Linear_Regression(batch_size)
        else:
            p = self.Simple_Linear_Regression()
        if show_loss:
            self.show_loss()
        return p

    def predict(self, X):
        return np.dot(np.column_stack((X, np.repeat(1, X.shape[0]))), self.parameter)

    def show_loss(self):
        """
        plot loss curve
        """
        check_point = list(range(0, self.epoch, self.check_point))
        plt.plot(check_point, self.loss_sequence, label='Epoch & Loss')
        plt.xlabel('epoch')
        plt.ylabel('Loss')
        plt.legend(loc="best")
        plt.show()

    def Simple_Linear_Regression(self):
        """
        Fit simple lr model & return final parameters
        """
        # Iteration
        for i in range(self.epoch):
            # y_hat
            predict_y = np.dot(self.X, self.parameter)
            # y-y_hat
            error_y = predict_y - self.y
            # (y-y_hat)^2
            cost = np.dot(error_y.T, error_y)
            # cal partial cost / partial x
            derivative = 0.5 * np.dot(error_y.T, self.X) / self.X.shape[0]
            # gradient descent
            self.parameter -= self.learningRate * derivative.T
            # Show
            self.show_Linear_Regression(predict_y, i, cost)
        if self.show_plot: plt.show()
        return self.parameter

    def miniBatch_Linear_Regression(self, batch_size):
        """
        Fit lr model with miniBatch & return final parameters
        """
        # Iteration
        for i in range(self.epoch):
            # random init
            randon_X = random.sample(
                range(0, self.X.shape[0]), self.X.shape[0])
            mini_loss = 0.
            # cal batch num
            Batch_num = int(self.X.shape[0] / batch_size) if self.X.shape[0] % batch_size == 0 else int(
                self.X.shape[0] / batch_size) + 1
            # fit with miniBatch but cal loss func with same way
            for batch in range(0, Batch_num):
                index = randon_X[batch * batch_size:(batch + 1) * batch_size]
                mini_X = self.X[index]
                mini_y = self.y[index]
                mini_predict_y = np.dot(mini_X, self.parameter)
                mini_error_y = mini_predict_y - mini_y
                # add up loss func together
                mini_loss += np.dot(mini_error_y.T, mini_error_y)
                derivative = 0.5 * np.dot(mini_error_y.T, mini_X) / mini_X.shape[0]
                self.parameter -= self.learningRate * derivative.T
            predict_y = np.dot(self.X, self.parameter)
            # show plot
            self.show_Linear_Regression(predict_y, i, mini_loss)
        if self.show_plot: plt.show()
        return self.parameter
