# -*-coding:utf-8 -*-
# @Time: 2023/3/25 21:04
# @Author: cuishuohao
# @File: demo1
# @Software: PyCharm
# 1构建二分类问题数据集

import numpy as np
import matplotlib.pyplot as plt

# figsize: 宽9，高6
fig, ax = plt.subplots(2, 2, figsize=(9, 6))
# 调整 hspace ，增大子图的上下边距
plt.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=None, hspace=0.4)
plt.rcParams['font.sans-serif'] = ['SimHei']  # 解决中文乱码

np.random.seed(0)
Num = 100
# 构建负例样本
x_1 = np.random.normal(3, 1, size=Num)
x_2 = np.random.normal(6, 1, size=Num)
y = np.zeros(Num)  # 这里的y指的是该样本属于正例的概率，全为0
c_0 = np.array([x_1, x_2, y])  # 0分类
# 构建正例样本
x_1 = np.random.normal(6, 1, size=Num)
x_2 = np.random.normal(3, 1, size=Num)
y = np.ones(Num)  # 这里的y指的是该样本属于正例的概率，全为1
c_1 = np.array([x_1, x_2, y])  # 1分类
# print("c_1.shape: ",c_1.shape) # 3行100列
# 调整数据形状
c_1 = c_1.T
c_0 = c_0.T
# 利用图形展示数据
ax[0][0].scatter(c_0[:, 0], c_0[:, 1], marker='x', label="负例样本")
ax[0][0].scatter(c_1[:, 0], c_1[:, 1], marker='+', label="正例样本")
ax[0][0].legend()
ax[0][0].set_xlabel("x_1")
ax[0][0].set_ylabel("x_2")
ax[0][0].set_title("subplot 1:dataset")

# 建立数据集，分训练集和测试集
all_data = np.concatenate((c_1, c_0))
# print("all_data.shape: ",all_data.shape) # 200行3列
np.random.shuffle(all_data)
train_data_X = all_data[:160, :2]
train_data_y = all_data[:160, -1]
test_data_X = all_data[160:, :2]
test_data_y = all_data[160:, -1]

# -----------------------------------------------------------------

# 这里我们将线性模型wX+b简化为w，所以在优化参数时只需要对参数w优化即可
# 初始化权重矩阵 w=(w_1,w_2)
w = np.random.rand(2, 1)  # 注意这里并未将w初始化为0
b = np.random.rand()
print("w, w.shape, b: ", w, w.shape, b)

# 此时可以看一下该模型在数据集上的展示
x = np.arange(1, 9)
y = -(w[0] * x) / w[1] +b
ax[0][1].plot(x, y, 'r')
ax[0][1].scatter(c_0[:, 0], c_0[:, 1], marker='x', label="负例样本")
ax[0][1].scatter(c_1[:, 0], c_1[:, 1], marker='+', label="正例样本")
ax[0][1].legend()
ax[0][1].set_xlabel("x_1")
ax[0][1].set_ylabel("x_2")
ax[0][1].set_title("subplot 2:init fitting line")


# -----------------------------------------------------------------

# 可以看到随机的模型并没有把正负例分开。然后构建logistic函数
def logistic(z):
    return 1. / (1. + np.exp(-z))  # 使用1.的目的在于计算得到浮点数


# 用交叉熵计算损失函数值，评估两个概率之间的差距
# 根据公式3.37用交叉简计算 1oss值
def loss_fun(y, y_hat):
    return -np.mean(y * (np.log(y_hat)) + (1 - y) * np.log(1 - y_hat))


# 定义超参:包括学习率和循环次数。
lr = 0.01
epochs = 5000

# 循环epochs次或者Loss到阈值停止训练。
w_list = []
loss_list = []

for i in range(epochs):
    res1 = np.dot(w.T, train_data_X.T) + b
    # print("矩阵相乘后的形状: ",res1.shape) # 1行160列
    y_hat = logistic(res1).reshape(-1)
    # print(type(y_hat), y_hat.shape) # numpy.ndarray,  (160,)
    loss = loss_fun(train_data_y, y_hat)
    # 计算梯度
    res2 = (train_data_y - y_hat).reshape(-1, 1)
    # print(type(res2), res2.shape)  # <class 'numpy.ndarray'> (160, 1)
    # todo  不理解这里的点乘
    grad_w = -np.mean(train_data_X * res2, axis=0)
    # print(type(train_data_X * res2), (train_data_X * res2).shape)  # <class 'numpy.ndarray'> (160, 2)
    # print(type(train_data_X), train_data_X.shape)  # <class 'numpy.ndarray'> (160, 2)
    # print(type(grad), grad.shape)  # <class 'numpy.ndarray'> (2,)
    w = w - (lr * grad_w).reshape(2, 1)

    grad_b = np.mean(res2)
    b = b - (lr * grad_b)

    w_list.append(w)
    loss_list.append(loss)
    if i % 100 == 0:
        print("i:", i, "loss;", loss)
    if loss < 0.0001:
        break

# 我们可以查看损失函数的变化情况:
ax[1][0].plot(loss_list, label="Loss Change")
ax[1][0].legend()
ax[1][0].set_title("subplot 3:loss decline curve")

# 可以看到模型是收敛的，此时可以画出分类的直线:
x = np.arange(1, 9)
y = -(w[0] * x) / w[1] + b  # 见注释
ax[1][1].plot(x, y, 'r')
ax[1][1].scatter(c_0[:, 0], c_0[:, 1], marker='x', label="负例样本")
ax[1][1].scatter(c_1[:, 0], c_1[:, 1], marker='+', label="正例样本")
ax[1][1].legend()
ax[1][1].set_xlabel("x_1")
ax[1][1].set_ylabel("x_2")
ax[1][1].set_title("subplot 4: fitting well")

plt.show()

print(w[0], w[1], b)
