import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
from sklearn.metrics import accuracy_score

plt.rcParams["font.sans-serif"] = ["SimHei"]
plt.rcParams['axes.unicode_minus'] = False  # 用来正常显示负号


# 数据加载函数
def loaddata():
    data = np.loadtxt('../data/data3.txt', delimiter=',')
    n = data.shape[1] - 1  # 特征数
    X = data[:, 0:n]
    y = data[:, -1].reshape(-1, 1)
    return X, y


# 数据标准化函数
def normalize(x):
    std = np.std(x, axis=0, ddof=1)
    avg = np.average(x, axis=0)
    return (x - avg) / std


# 绘制二维散点图函数
def plot(X, y):
    pos = np.where(y == 1)
    neg = np.where(y == 0)
    plt.scatter(X[pos[0], 0], X[pos[0], 1], marker='x')
    plt.scatter(X[neg[0], 0], X[neg[0], 1], marker='o')
    plt.xlabel('Exam 1 score')
    plt.ylabel('Exam 2 score')


# 0 1  映射函数
def sigmoid(z):
    r = 1 / (1 + np.exp(-z))
    return r


# 假设函数
def hypothesis(X, theta):
    z = np.dot(X, theta)
    return sigmoid(z)


# 损失函数
def computeCost(X, y, theta):
    # 补充计算代价的代码；
    m = X.shape[0]  # 行数
    y_hat = hypothesis(X, theta)
    z = -1 * (y * np.log(y_hat) + (1 - y) * np.log(1 - y_hat))  # 交叉熵损失
    return np.sum(z) / m


def gradientDescent(X, y, theta, iterations, alpha):
    # 取数据条数
    m = X.shape[0]
    # 在x最前面插入全1的列
    X = np.hstack((np.ones((m, 1)), X))
    loss_history = np.zeros(iterations).reshape(iterations, 1)
    for i in range(iterations):
        # 补充参数更新代码；
        y_hat = hypothesis(X, theta)
        loss = y - y_hat
        theta = theta + alpha * np.dot(X.T, loss)
        loss_history[i] = computeCost(X, y, theta)
        # 每迭代1000次输出一次损失值
        # if (i % 10000 == 0):
        # print('第', i, '次迭代，当前损失为：', computeCost(X, y, theta), 'theta=', theta, "shape=", theta.shape)
    return theta, loss_history


def plotloss(iterations, loss_history):
    plt.plot(np.arange(iterations), loss_history, 'g--', label='Loss Curve')
    plt.xlabel('Iterations')  # x轴Label
    plt.ylabel('Loss')  # y轴Label
    plt.title('损失函数图形')  # y轴Label
    plt.legend()  # 显示图例
    plt.show()


def predict(X, theta):
    # 在x最前面插入全1的列
    c = np.ones(X.shape[0]).transpose()
    X = np.insert(X, 0, values=c, axis=1)
    # 求解假设函数的值
    h = hypothesis(X, theta)
    # 根据概率值决定最终的分类,>=0.5为1类，<0.5为0类
    h[h >= 0.5] = 1
    h[h < 0.5] = 0
    return h


def plotDescisionBoundary(X, y, theta):
    cm_dark = mpl.colors.ListedColormap(['g', 'r'])
    plt.xlabel('Exam 1 score')
    plt.ylabel('Exam 2 score')
    plt.scatter(X[:, 0], X[:, 1], c=np.array(y).squeeze(), cmap=cm_dark, s=30)
    x1 = np.arange(min(X[:, 0]), max(X[:, 0]), 0.01)
    x2 = -(theta[1] * x1 + theta[0]) / theta[2]
    # 补充画决策边界代码；
    plt.plot(x1, x2)
    plt.show()


# 二维散点图

X, y = loaddata()
X = normalize(X)

# 散点图
plot(X, y)
plt.show()

m = X.shape[0]  # 样本数
n = X.shape[1]  # 特征数
# 初始化theta ，theta是列向量,+1是因为求梯度时X前会增加一个全1列
theta = np.zeros(n + 1).reshape(n + 1, 1)

iterations = 1000
alpha = 0.008  # 学习率
real_theta, loss_history = gradientDescent(X, y, theta, iterations, alpha)

print("最终的参数", real_theta)
print("最终的损失值", loss_history[-1])

# 损失函数图
plotloss(iterations, loss_history)
# 决策边界图
plotDescisionBoundary(X, y, real_theta)

h_test = predict(X, real_theta)
print("准确度:", np.mean(h_test == y))

print("准确度:", accuracy_score(y, h_test))
