# https://zhuanlan.zhihu.com/p/419092667
import torch
import torch.nn as nn
import torch.optim as optim
import matplotlib.pyplot as plt

torch.manual_seed(40)
y1 = torch.randn(1000, 2) + torch.randn(1000, 2) * 0.1 + torch.tensor([1, 0])
y2 = torch.randn(1000, 2) + torch.randn(1000, 2) * 0.01 + torch.tensor([5, -7])
y = torch.cat([y1, y2])

print(y[:10])


class regression(nn.Module):

    def __init__(self, eps):
        super(regression, self).__init__()
        self.w = nn.Parameter(torch.ones(2))
        self.b = nn.Parameter(torch.zeros(1))
        self.eps = eps

    def forward(self, x):
        w = self.w / self.w.norm()
        x = torch.matmul(x, w) + self.b
        return x[x > self.eps].sum() - x[x < -self.eps].sum()


model = regression(1)
opt = optim.SGD(model.parameters(), lr=0.001)
for i in range(500):  # 训练
    loss = model(y)
    opt.zero_grad()
    loss.backward()
    print(loss)
    opt.step()

# plt.scatter(y[:, 0], y[:, 1], s=10)
# x = torch.arange(-2, 8, 0.1)
# w = model.w / model.w.norm()
# yy = -(w[0] * x + model.b) / w[1]

# plt.plot(x, yy.data)

# plt.show()
