import numpy as np
import matplotlib.pyplot as plt

# 1.	完成数据集的加载 5分
data = np.loadtxt('../../../large_dat/机器学习1-周考3-技能/egg.txt', delimiter=',')

# 2.	数据预处理（矩阵化）5分
x = data[:, :-1]
m = len(x)
y = data[:, -1:]

# 3.	数据的洗牌 5分
np.random.seed(1)
a = np.random.permutation(m)
x = x[a]
y = y[a]
X = np.c_[np.ones(m), x]

# 4.	分割成训练集和测试集 5分
m_train = int(0.7 * m)
x_train, x_test = np.split(x, [m_train])
y_train, y_test = np.split(y, [m_train])
X_train, X_test = np.split(X, [m_train])


# 5.	实现sigmoid激活函数及其导数 5分
def sigmoid(z):
    return 1 / (1 + np.exp(-z))


def model(X, theta):
    return X.dot(theta)


# 6.	前向传播 5分
def FP(X, theta1, theta2):
    a1 = X
    z2 = model(a1, theta1)
    a2 = sigmoid(z2)
    z3 = model(a2, theta2)
    a3 = sigmoid(z3)
    return a2, a3


# 7.	自定义实现代价函数 5分
def cost_func(h, y):
    m = len(h)
    return -1 / m * np.sum(y * np.log(h) + (1 - y) * np.log(1 - h))


# 8.	反向传播 5分
def BP(X, y, theta1, theta2, a2, a3, alpha):
    m = len(y)
    s3 = a3 - y
    s2 = s3.dot(theta2.T) * (a2 * (1 - a2))
    dt2 = 1 / m * a2.T.dot(s3)
    dt1 = 1 / m * X.T.dot(s2)
    theta2 -= alpha * dt2
    theta1 -= alpha * dt1
    return theta1, theta2


# 9.	实现梯度下降并记录代价函数 5分
def grad(X, y, alpha=0.1, iter0=1000):
    m, n = X.shape
    group = iter0 // 20
    theta1 = np.zeros(n, 4)
    theta2 = np.zeros(4, 1)
    j_his = np.zeros(iter0)
    for i in range(iter0):
        a2, a3 = FP(X, theta1, theta2)
        j = cost_func(a3, y)
        j_his[i] = j
        if 0 == i % group:
            print(f'#{i + 1} cost func value = {j}')
        theta1, theta2 = BP(X, y, theta1, theta2, a2, a3, alpha)
    if 0 != i % group:
        print(f'#{i + 1} cost func value = {j}')
    return theta1, theta2, j_his, a3


# 10.	自定义准确率计算方法 5分
def score(h, y):
    return y == (h > 0.5)

# 11.	写出主函数调用梯度下降完成模型的训练 5分
# 12.	在测试集上完成预测 5分
# 13.	画出代价函数曲线 5分
# 14.	计算并输出在测试集上的准确率 5分
