import numpy as np
from matplotlib import pyplot
import tensorflow as tf

sample_num = 1000
ne_samples = np.random.multivariate_normal(mean=[0, 3], cov=[[1, .5], [.5, 1]], size=sample_num)
po_samples = np.random.multivariate_normal(mean=[3, 0], cov=[[1, .5], [.5, 1]], size=sample_num)

inputs = np.vstack((ne_samples, po_samples)).astype(np.float32)
targets = np.vstack((np.zeros((sample_num, 1), dtype='float32'), np.ones((sample_num, 1), dtype='float32')))

pyplot.scatter(inputs[:, 0], inputs[:, 1], c=targets[:, 0])
# pyplot.show()

input_dim = 2
output_dim = 1

w = tf.Variable(initial_value=tf.random.uniform(shape=(input_dim, output_dim)))
b = tf.Variable(initial_value=tf.zeros(shape=(output_dim, )))


def model(x):
    return tf.matmul(x, w) + b


def square_loss(outputs, predictions):
    sample_loss = tf.square(outputs - predictions)
    return tf.reduce_mean(sample_loss)


learning_rate = .1


def train(x, outputs):
    with tf.GradientTape() as tape:
        predictions = model(x)
        loss = square_loss(outputs, predictions)
        grad_loss_w, grad_loss_b = tape.gradient(loss, [w, b])
        w.assign_sub(grad_loss_w * learning_rate)
        b.assign_sub(grad_loss_b * learning_rate)
    return loss


for epoch in range(20):
    loss = train(inputs, targets)
    print(f'loss at step {epoch}: {loss:.4f}')


x = np.linspace(-1, 4, 100)
y = - (w[0] / w[1] * x) + (0.5 - b) / w[1]
pyplot.plot(x, y, '-r')
predictions = model(inputs)
pyplot.scatter(inputs[:, 0], inputs[:, 1], c=predictions[:, 0] > 0.5)
pyplot.show()
