# 线性分类器，用于二维平面上的点的分类

import tensorflow as tf
import matplotlib.pyplot as plt
import numpy as np

# 定义两组多元正态分布的点
num_samples_per_class = 1000
samples1 = np.random.multivariate_normal(mean=[0, 3], cov=[[1, 0.5], [0.5, 1]], size=num_samples_per_class)
samples2 = np.random.multivariate_normal(mean=[3, 0], cov=[[1, 0.5], [0.5, 1]], size=num_samples_per_class)
# print(samples1.shape)
# print(samples2.shape)
# 定义模型的输入
inputs = np.vstack((samples1, samples2)).astype(dtype=np.float32)
# 定义目标输出
target_zeros = np.zeros((num_samples_per_class, 1), dtype=np.float32)
target_ones = np.ones((num_samples_per_class, 1), dtype=np.float32)
targets = np.vstack((target_zeros, target_ones))
# print(targets.shape)
# 绘制散点图
plt.scatter(inputs[:, 0], inputs[:, 1], c=targets)
# plt.show()

# 定义权重w和b
input_dim = 2
output_dim = 1
w = tf.Variable(initial_value=tf.random.uniform(shape=(input_dim, output_dim)))
b = tf.Variable(initial_value=tf.zeros(shape=(output_dim,)))

# print(w)
# print(b)

# 定义模型
# prediction=input*w+b
# =(x,y)*(w1,w2)+b
# =w1x+w2y+b
# w2y=-w1x+prediction-b
# y=-(w1/w2)x+(prediction-b)/w2
def model():
    return tf.matmul(inputs, w) + b


# 训练次数
train_times = 40
# 学习率
learning_rate = 0.1


def train():
    for i in range(train_times):
        print(f'train: {i} time')
        train_once()


# 训练一次
def train_once():
    # 创建上下文管理器，上下文会记录里面的张量操作
    # 也就是说我们必须把如何通过w和b计算出loss的过程放在里面
    with tf.GradientTape() as tape:
        predictions = model()
        mean_loss = square_loss(predictions)
        # print(predictions)
        print(mean_loss)
        # 计算loss对于权重的导数
        gradient_w, gradient_b = tape.gradient(mean_loss, [w, b])
        # print(gradient_w)
        # 更新权重
        w.assign_sub(gradient_w * learning_rate)
        b.assign_sub(gradient_b * learning_rate)
        # print(w)
        # print(b)


# 损失函数（平方差）
def square_loss(predictions):
    losses = tf.square(targets - predictions)
    mean_loss = tf.reduce_mean(losses)
    return mean_loss


# 开始执行
train()

# prediction>0.5, 归为1类
# prediction<0.5, 归为0类
# 计算预测的准确度
last_predictions = model()
# print(last_predictions)

zero_or_ones = np.array([1 if p > 0.5 else 0 for p in last_predictions])
# print(zero_or_ones.shape)
# zero_or_ones 此时是一维数组

# 使用预测结果绘图
# plt.scatter(inputs[:, 0], inputs[:, 1], c=zero_or_ones)
# plt.show()

# 计算预测结果的准确度
# numpy的==操作是对张量的逐个元素比较
# 对于一维数组，是按数组的索引顺序依次比较
# 对于二维矩阵，是行与行的逐个元素比较，可以理解为多个一维数组的比较
# 此时对targets降维，保证两边都是一维数组
matches = zero_or_ones == targets.flatten()
# print(matches.shape)
print(f'accuracy={matches.mean():.4f}')

# 绘制用于分类的线，即prediction=0.5
# y=-(w1/w2)x+(prediction-b)/w2
x=np.linspace(-2,6,num=10)
y=-w[0]/w[1]*x+(0.5-b)/w[1]
plt.plot(x,y,c='r')
plt.show()

