"""
从零开始实现线性回归，使用Huber损失函数
"""
import random
import torch
from d2l import torch as d2l


class Linreg():
    def __init__(self):
        self.lr = 0.03
        self.num_epochs = 3
        self.batch_size = 10
        self.net = self.linreg
        self.loss = self.huber_loss

    def synthetic_data(self, w, b, num_examples):
        """生成y=Xw+b+噪声"""
        X = torch.normal(0, 1, (num_examples, len(w)))
        y = torch.matmul(X, w) + b
        y += torch.normal(0, 0.01, y.shape)
        return X, y.reshape((-1, 1))

    def data_iter(self, batch_size, features, labels):
        num_examples = len(features)
        indices = list(range(num_examples))
        # 这些样本是随机读取的，没有特定的顺序
        random.shuffle(indices)
        for i in range(0, num_examples, batch_size):
            batch_indices = torch.tensor(
                indices[i: min(i + batch_size, num_examples)])
            yield features[batch_indices], labels[batch_indices]

    def linreg(self, X, w, b):
        """线性回归模型"""
        return torch.matmul(X, w) + b

    def MAE(self, y, y_predicted):
        error = y_predicted - y
        absolute_error = torch.absolute(error)
        total_absolute_error = torch.sum(absolute_error)
        mae = total_absolute_error / y.shape[0]
        return mae

    def huber_loss(self, y_hat, y):
        """Huber损失函数"""
        delta = 1.35 * self.MAE(y, y_hat)
        huber_loss = torch.where(torch.abs(y - y_hat) < delta, 0.5 * ((y - y_hat) ** 2),
                                 delta * torch.abs(y - y_hat) - 0.5 * (delta ** 2))
        return torch.sum(huber_loss)

    def sgd(self, params, lr, batch_size):
        """小批量随机梯度下降"""
        with torch.no_grad():
            for param in params:
                param -= lr * param.grad / batch_size
                param.grad.zero_()

    def main(self):
        true_w = torch.tensor([2, -3.4])
        true_b = 4.2
        features, labels = self.synthetic_data(true_w, true_b, 1000)
        w = torch.normal(0, 0.01, size=(2, 1), requires_grad=True)
        b = torch.zeros(1, requires_grad=True)

        for epoch in range(self.num_epochs):
            for X, y in self.data_iter(self.batch_size, features, labels):
                l = self.loss(self.net(X, w, b), y)  # X和y的小批量损失
                # 因为l形状是(batch_size,1)，而不是一个标量。l中的所有元素被加到一起，
                # 并以此计算关于[w,b]的梯度
                l.sum().backward()
                self.sgd([w, b], self.lr, self.batch_size)  # 使用参数的梯度更新参数
            with torch.no_grad():
                train_l = self.loss(self.net(features, w, b), labels)
                print(f'epoch {epoch + 1}, loss {float(train_l.mean()):f}')

        print(f'w的估计误差: {true_w - w.reshape(true_w.shape)}')
        print(f'b的估计误差: {true_b - b}')


if __name__ == '__main__':
    lingre = Linreg()
    lingre.main()
