import matplotlib.pyplot as plt
import numpy as np

from typing import Generator, Tuple

from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
from sklearn.datasets import make_blobs  # 用于创建正态分布数据点


def sigmoid_activation(x: np.ndarray) -> np.ndarray:
    # sigmoid激活函数
    return 1 / (1 + np.exp(-x))


def predict(x: np.ndarray, w: np.ndarray) -> np.ndarray:
    """
    预测函数

    :param x: 数据集
    :param w: 权重矩阵
    :return: 预测结果矩阵
    """
    preds = sigmoid_activation(x.dot(w))
    preds[preds <= 0.5] = 0
    preds[preds > 0.5] = 1
    return preds


def main(epochs: int, alpha: int = 0.01):
    x, y = make_blobs(n_samples=1000, n_features=2, centers=2,
                      cluster_std=1.5, random_state=1)
    y = y.reshape((y.shape[0], 1))
    x = np.c_[x, np.ones((x.shape[0]))]
    train_x, test_x, train_y, test_y = train_test_split(x, y, test_size=0.5,
                                                        random_state=42)
    print('[info]:训练中...')
    W = np.random.randn(x.shape[1], 1)
    losses = []

    # W = normal_gradient_descent(alpha,
    #                             W,
    #                             losses,
    #                             epochs,
    #                             train_x,
    #                             train_y)  # 普通梯度下降

    W = s_gradient_descent(alpha, W, epochs, losses, train_x, train_y)

    print("[INFO] evaluating...")
    preds = predict(test_x, W)
    print(classification_report(test_y, preds))
    plot_pic(test_x, test_y, epochs, losses)


def next_batch(batch_size: int, train_x: np.ndarray,
               train_y: np.ndarray) -> Generator[Tuple[np.ndarray, np.ndarray], None, None]:
    """
    SGD(随机梯度下降)需要的函数，分成多个batch进行训练更新

    :param batch_size: batch大小
    :param train_x: 训练集
    :param train_y: 训练标签
    :return: 一个矩阵生成器，每次返回一小块包含训练集和标签的元组
    """
    for i in np.arange(0, train_x.shape[0], batch_size):
        yield train_x[i:i + batch_size], train_y[i:i + batch_size]


def s_gradient_descent(alpha: float, W: np.ndarray,
                       epochs: int, losses: list,
                       train_x:np.ndarray, train_y: np.ndarray) -> np.ndarray:
    """
    SGD(随机梯度下降)

    :param losses: 总体损失
    :param alpha: 学习率
    :param W: 权重矩阵
    :param epochs: 迭代次数
    :param train_x: 训练集
    :param train_y: 标签
    :return: 权重矩阵
    """
    epoch_loss = []
    for epoch in np.arange(0, epochs):
        for batch_x, batch_y in next_batch(32, train_x, train_y):
            preds = sigmoid_activation(batch_x.dot(W))
            error = preds - batch_y
            epoch_loss.append(np.sum(error ** 2))
            gradient = batch_x.T.dot(error)
            W += -alpha * gradient
        loss = np.average(epoch_loss)
        losses.append(loss)
        if epoch == 0 or epoch % 5 == 0:
            print(f"[info]: {epoch=} {loss=:.7f}")
    return W


def normal_gradient_descent(alpha: float, W: np.ndarray,
                     losses: list, epochs: int,
                     train_x: np.ndarray, train_y: np.ndarray) -> np.ndarray:
    """
    普通梯度下降

    :param alpha: 学习率
    :param W: 权重矩阵
    :param losses: 损失列表
    :param epochs: 迭代次数
    :param train_x: 训练集
    :param train_y: 训练标签
    :return: 梯度更新后的权重矩阵
    """
    for epoch in np.arange(0, epochs):
        preds = sigmoid_activation(train_x.dot(W))
        error = preds - train_y
        loss = np.sum(error ** 2)
        losses.append(loss)
        gradient = train_x.T.dot(error)  # TODO:注意==矩阵求导
        W += -alpha * gradient
        if epoch == 0 or (epoch + 1) % 5 == 0:
            print(f"[info]:{epoch=}, {loss=:.7f}")
    return W


def plot_pic(test_x: np.ndarray, test_y: np.ndarray, epochs: int, losses: list):
    """
    画图用

    :param losses: 损失函数生成值记录列表
    :param epochs: 训练次数
    :param test_x: 测试集数据点
    :param test_y: 测试集标签
    :return:
    """
    plt.style.use("ggplot")
    plt.figure()
    plt.title("Data")
    plt.scatter(test_x[:, 0], test_x[:, 1], marker="o", c=test_y, s=30)

    plt.style.use("ggplot")
    plt.figure()
    plt.plot(np.arange(0, epochs), losses)
    plt.title("Training Loss")
    plt.xlabel("Epoch #")
    plt.ylabel("Loss")
    plt.show()


if __name__ == '__main__':
    main(100)
