import math
import random

import matplotlib.pyplot as plt
import numpy as np
import copy
from sklearn.datasets import load_iris, load_breast_cancer


def generate_data(sample_num: int, u: [[]] = None, sigma: [[]] = None, naive: bool = True):
    """
    以(u[0][0],u[0][1])为中心生成满足协方差矩阵为sigma的高斯分布的(sample_num/2)个数据，
    以(u[1][0],u[1][1])为中心生成满足协方差矩阵为sigma的高斯分布的(sample_num/2)个数据
    :param sample_num: 样本个数，为偶数
    :param u: 2*2的样本均值均值矩阵
    :param sigma: 2*2的协方差矩阵
    :param naive:是否满足贝叶斯假设，如果置为True，则协方差矩阵sigma变为非对角线值全为0的矩阵；
    如果置为False，则不变
    :return:x_train:shape=(sample_num,2), y_train(sample_num,1),x_test:shape=(sample_num/4,2), y_train(sample_num/4,1)
    """
    if sigma is None:
        sigma = [[0.2, 0.2], [0.2, 0.2]]
    if u is None:
        u = [[5, 5], [4, 4]]
    sigma_new = copy.deepcopy(sigma)  # 拷贝数据，防止修改参数中的值
    n = int(sample_num / 2)
    n_test = int(sample_num / 8)
    x_train = np.zeros((sample_num, 2), dtype=np.float32)
    y_train = np.zeros((sample_num, 1), dtype=np.int32)
    x_test = np.zeros((n_test * 2, 2), dtype=np.float32)
    y_test = np.zeros((n_test * 2, 1), dtype=np.int32)
    if naive:  # 如果置为True，则协方差矩阵sigma变为非对角线值全为0的矩阵
        sigma_new[0][1] = 0
        sigma_new[1][0] = 0
    x_train[:n, :] = np.random.multivariate_normal(u[0], sigma_new, size=n)
    x_test[:n_test, :] = np.random.multivariate_normal(u[0], sigma_new, size=n_test)
    x_train[n:, :] = np.random.multivariate_normal(u[1], sigma_new, size=n)
    x_test[n_test:, :] = np.random.multivariate_normal(u[1], sigma_new, size=n_test)
    y_train[:n] = 0
    y_test[:n_test] = 0
    y_train[n:] = 1
    y_test[n_test:] = 1
    return x_train, y_train, x_test, y_test


def draw_picture_2d(x_train: np.ndarray, y_train: np.ndarray, discriminant_function=None, title: str = 'picture'):
    """
    绘制二维散点图以及决策函数
    :param title: 标题名称
    :param x_train: 二维向量
    :param y_train: 一维向量，与x_train元素数目相同
    :param discriminant_function: 判别函数
    :return: 无
    """
    x_min = min(x_train[:, 0]) - 1
    x_max = max(x_train[:, 0]) + 1
    y_min = min(x_train[:, 1]) - 1
    y_max = max(x_train[:, 1]) + 1
    plt.axis([x_min, x_max, y_min, y_max])  # 规定坐标轴的值
    if discriminant_function:
        x = np.arange(x_min, x_max, 0.01)
        plt.plot(x, discriminant_function(x))
    plt.scatter(x=x_train[:, 0], y=x_train[:, 1], c=list(y_train), marker='o')
    plt.title(title)
    plt.ylabel('y')
    plt.xlabel('x')
    plt.legend(['discriminant_function'], loc='best')
    plt.show()


def get_discriminant_function(w):
    """
    获得二维平面中的一条直线
    :param w: shape=(3,1)的系数矩阵
    :return: 一次函数
    """
    w = w.T[0]
    f = np.poly1d([-w[1] / w[2], -w[0] / w[2]])
    print('判别函数为：', f)
    return f


def sigmoid(x):
    return 1 / (1 + np.exp(-x))


def gradient_descent(x_train: np.ndarray, y_train: np.ndarray, learning_rate: float, l: float = 0.0,
                     epsilon: float = 1e-7, epochs: int = 100000):
    """
    梯度下降法
    :param x_train: shape=(sample_num, feature)的向量
    :param y_train: shape=(sample_num)的向量
    :param learning_rate: 学习率
    :param l: 二范数系数
    :param epsilon: 当梯度的模小于epsilon时收敛
    :param epochs: 当迭代次数大于epochs时，停止迭代
    :return: shape=(feature+1,1)的系数矩阵
    """
    sample_num = x_train.shape[0]
    feature = x_train.shape[1]
    w = np.ones([feature + 1, 1])  # 初始化w矩阵
    x_train = np.c_[np.ones([sample_num, 1]), x_train]  # 将x_train进行增广，第一列加全1
    gradient_module = 100  # 梯度的模，用于控制迭代次数
    i = 1
    while gradient_module > epsilon and i < epochs:
        gradient = - x_train.T.dot(y_train - sigmoid(x_train.dot(w))) / sample_num  # 计算梯度
        w = w - learning_rate * (l * w + gradient)  # 迭代
        gradient_module = np.linalg.norm(gradient)  # 计算梯度的模
        # print('gradient_module={}'.format(gradient_module))
        i += 1
    return w


def predict(w, x_test):
    """
    通过系数矩阵w以及样本x进行预测
    :param w: shape=(feature+1,1)的系数矩阵
    :param x_test: shape=(test_sample_num,feature)的测试数据矩阵
    :return: shape=(test_sample_num,1)的预测值
    """
    test_sample_num = x_test.shape[0]
    x_test = np.c_[np.ones([test_sample_num, 1]), x_test]  # 将x_test进行增广，第一列加全1
    y_pred = []
    for y in x_test.dot(w):
        if y > 0:
            y_pred.append([1])
        elif y < 0:
            y_pred.append([0])
        else:
            y_pred.append([np.random.randint(0, 2, 1, dtype=np.int32)])  # 如果概率相同，则随机取值
    return np.array(y_pred, dtype=np.int32)


def accuracy(y_true, y_pred):
    """
    通过y_true, y_pred计算accuracy
    :param y_true: shape=(sample_num,1)的准确值矩阵
    :param y_pred: shape=(sample_num,1)的预测值矩阵
    :return: 无
    """
    total = y_true.shape[0]
    correct = 0
    for i in range(total):
        if y_true[i][0] == y_pred[i][0]:
            correct += 1
    return correct / total


if __name__ == '__main__':
    # 自己产生数据
    u = [[2, 2], [4, 4]]
    sigma = [[3, 1.5], [1.5, 3]]
    naive = True
    # naive = False

    sample_num = 200
    learning_rate = 1e-2
    x_train, y_train, x_test, y_test = generate_data(sample_num, u, sigma, naive)

    l = 0.001
    print('\033[0;34;40-m---有正则项，l={}---\033[0m'.format(l))
    w = gradient_descent(x_train, y_train, learning_rate, l)
    discriminant_function = get_discriminant_function(w)
    y_pred = predict(w, x_train)
    acc = accuracy(y_train, y_pred)
    draw_picture_2d(x_train, y_train, discriminant_function,
                    title='Training with l={} naive={} train_acc={}'.format(l, naive, acc))
    y_pred = predict(w, x_test)
    acc = accuracy(y_test, y_pred)
    draw_picture_2d(x_test, y_test, discriminant_function,
                    title='Testing with l={} naive={} test_acc={}'.format(l, naive, acc))

    l = 0
    print('\033[0;34;40-m---无正则项---\033[0m')
    w = gradient_descent(x_train, y_train, learning_rate, l)
    discriminant_function = get_discriminant_function(w)
    y_pred = predict(w, x_train)
    acc = accuracy(y_train, y_pred)
    draw_picture_2d(x_train, y_train, discriminant_function,
                    title='Training with l={} naive={} train_acc={}'.format(l, naive, acc))
    y_pred = predict(w, x_test)
    acc = accuracy(y_test, y_pred)
    draw_picture_2d(x_test, y_test, discriminant_function,
                    title='Testing with l={} naive={} test_acc={}'.format(l, naive, acc))

    # # 鸢尾花数据集
    # learning_rate = 1e-2
    # l = 0.001
    # iris = load_iris()
    # sample = random.sample(range(0, 100), 25)
    # sample_left = list(set(range(0, 100)).difference(set(sample)))
    # iris_data = np.mat(iris.data)
    # iris_target = np.mat(iris.target).T
    # x_train = iris_data[sample_left]
    # y_train = iris_target[sample_left]
    # x_test = iris_data[sample]
    # y_test = iris_target[sample]
    # w = gradient_descent(x_train, y_train, learning_rate, l)
    # y_pred = predict(w, x_test)
    # acc = accuracy(y_test, y_pred)
    # print('accuracy = {}'.format(acc))

    # breast_cancer数据集
    # learning_rate = 1e-4
    # l = 0.001
    # breast_cancer = load_breast_cancer()
    # sample = random.sample(range(200, 400), 50)
    # sample_left = list(set(range(200, 400)).difference(set(sample)))
    # breast_data = np.mat(breast_cancer.data)
    # breast_target = np.mat(breast_cancer.target).T
    # x_train = breast_data[sample_left]
    # y_train = breast_target[sample_left]
    # x_test = breast_data[sample]
    # y_test = breast_target[sample]
    # w = gradient_descent(x_train, y_train, learning_rate, l)
    # y_pred = predict(w, x_test)
    # acc = accuracy(y_test, y_pred)
    # print('accuracy = {}'.format(acc))
