import numpy as np
import torch
import matplotlib.pyplot as plt

pts1 = torch.normal(1.5, 0.3, (20, 2))
labels1 = torch.zeros((20, 1))
pts1 = torch.concatenate((pts1, labels1), dim=1)
pts2 = torch.normal(0.5, 0.3, (20, 2))
labels2 = torch.ones((20, 1))
pts2 = torch.concatenate((pts2, labels2), dim=1)

pts = torch.concatenate((pts1, pts2), dim=0)
random_indices = torch.randperm(pts.shape[0])
pts = pts[random_indices]

w = torch.tensor([0.1, 0.1], requires_grad=True)
b = torch.tensor([0.2, 0.2], requires_grad=True)
x = pts[:, :2]
y = pts[:, 2]

num_classes = 2


def one_hot(label):
    a = torch.zeros((num_classes,))
    a[label] = 1
    return a


def softmax(x):
    return torch.exp(x) / torch.sum(torch.exp(x))


epochs = 100
cross_loss = None
for epoch in range(epochs):
    for i, item in enumerate(x):
        linear_output = item * w + b
        sft_out = softmax(linear_output)
        label = one_hot(y[i].numpy())
        cross_loss = -torch.sum(label * torch.log(sft_out))
        cross_loss.backward()
        with torch.no_grad():
            w -= w.grad * 0.01
            b -= b.grad * 0.01
            w.grad.zero_()
            b.grad.zero_()
    if epoch % 10 == 0:
        print(f"loss:{cross_loss.item():.4f}")

y_predict = []
for i, item in enumerate(x):
    linear_output = item * w + b
    sft_out = softmax(linear_output)
    y_predict.append(torch.argmax(sft_out).view(-1, 1))
y_predict = torch.concatenate(y_predict).reshape(-1)

print(y)
print(y_predict)
