# -*- coding: utf-8 -*-
"""
author:LTH
data:
"""

from random import random

import numpy as np

'''
这里做的是一元线性回归，如果要改成多元线性回归，可以将np的计算方法引入，通过批处理实现快速计算

1->数据准备
2->初始化数据
3->训练更新参数
4->输出

最重要的是确定loss函数
'''


class LR():
    def __init__(self):
        '''
        prepare data
        '''
        self.data = np.genfromtxt("data.txt", delimiter=",")
        self.lr = 0.000001

        '''
        init parameter
        '''
        self.b = random()
        self.w = random()

        '''
        train process
        '''
        iteration = 100
        for i in range(iteration):
            loss = self._forward()
            print("iteration " + str(i) + "---> " + str(loss))
            self._backward()
        print(self._forward())

    def _forward(self):
        totalError = 0
        for i in range(len(self.data)):
            x = self.data[i, 0]
            y = self.data[i, 1]
            totalError += (y - (self.w * x + self.b)) ** 2
        return totalError / float(len(self.data))

    def _backward(self):
        b_gradient = 0
        w_gradient = 0
        N = float(len(self.data))
        for i in range(len(self.data)):
            x = self.data[i, 0]
            y = self.data[i, 1]
            b_gradient += 2 * ((self.w * x) + self.b - y)
            w_gradient += 2 * x * ((self.w * x) + self.b - y)
        b_gradient = b_gradient / N
        w_gradient = w_gradient / N
        self.b = self.b - (self.lr * b_gradient)
        self.w = self.w - (self.lr * w_gradient)


class LR_mult():
    def __init__(self):
        '''
        prepare data
        '''
        # self.data=np.genfromtxt("data.txt",delimiter=",")
        self.data = np.random.randn(100, 4)
        self.lr = 0.0001

        self.w = np.random.randn(1, 3)
        self.b = np.random.randn(1)

        '''
        train process
        '''
        iteration = 100
        for i in range(iteration):
            loss = self._forward()
            print("iteration " + str(i) + "---> " + str(loss))
            self._backward()
        print(self._forward())

    def _forward(self):
        totalError = 0
        for i in range(len(self.data)):
            x = self.data[i, :3]
            y = self.data[i, -1]
            totalError += (y - self.w.dot(x).sum() + self.b) ** 2
        return totalError / float(len(self.data))

    def _backward(self):
        b_gradient = np.zeros((100, 1))
        w_gradient = np.zeros((100, 3))
        N = float(len(self.data))
        for i in range(len(self.data)):
            x = self.data[i, :3]
            y = self.data[i, -1]
            b_gradient += 2 * (self.w * x + self.b - y)
            w_gradient += 2 * x * (self.w * x + self.b - y)
        b_gradient = b_gradient / N
        w_gradient = w_gradient / N
        self.b = self.b - self.lr * b_gradient
        self.w = self.w - self.lr * w_gradient


model = LR_mult()
