import numpy as np
import pandas as pd


class LRModel(object):

    def __init__(self, init_weight, sample_count, fit_intercept=True):
        self.init_weight = init_weight
        self.sample_count = sample_count
        self.fit_intercept = fit_intercept

    def generate_csv(self):
        feature_count = len(self.init_weight) - self.fit_intercept

        df = pd.DataFrame()

        for i in range(feature_count):
            df[f'x{i}'] = np.random.randint(0, 40, self.sample_count)

        if self.fit_intercept:
            df['y'] = np.sum(
                df.values * np.array(self.init_weight[:-1]),
                axis=1) + self.init_weight[-1]
        else:
            df['y'] = np.sum(df.values * np.array(self.init_weight), axis=1)
        return df

    def train(self):
        epoch = 100000
        pd_data = self.generate_csv()
        y_train = pd_data['y'].values
        X_train = pd_data.drop(['y'], axis=1).values

        w = np.array([100 for _ in range(len(self.init_weight))])

        for e in range(epoch):
            if e % 1000 == 0:
                print(f'epoch={e}, w={w}')
            if self.fit_intercept:
                x_val = np.c_[X_train, np.ones(self.sample_count)]
            else:
                x_val = X_train
            pred = np.sum(x_val * w, axis=1)

            delta_y = pred - y_train

            grad = []
            for x, dy in zip(x_val, delta_y):
                grad.append(x * dy)

            grad = np.array(grad)
            grad = np.sum(grad, axis=0) / self.sample_count
            learning_rate = 0.001
            w = w - learning_rate * grad


if __name__ == '__main__':
    lr = LRModel([12, 3, -4, 328], 1000, True)

    lr.train()
