import numpy as np
import pandas as pd
import matplotlib.pyplot as plt

data = pd.read_csv('ex2data2.txt', names=['Test 1', 'Test 2', 'Accepted'])

fig, ax = plt.subplots()
ax.scatter(data[data['Accepted'] == 0]['Test 1'], data[data['Accepted'] == 0]['Test 2'], c='r', marker='x', label='y=0')
ax.scatter(data[data['Accepted'] == 1]['Test 1'], data[data['Accepted'] == 1]['Test 2'], c='b', marker='o', label='y=1')
ax.legend()
ax.set(xlabel='Test 1', ylabel='Test 2')
plt.show()


# =====================================特征映射
def feature_mapping(x1, x2, power):
    data = {}
    for i in np.arange(power + 1):
        for j in np.arange(i + 1):
            data['F{}{}'.format(i - j, j)] = np.power(x1, i - j) * np.power(x2, j)
    return pd.DataFrame(data)


x1 = data['Test 1']
x2 = data['Test 2']

data2 = feature_mapping(x1, x2, 6)

print(data2.head())
#    F00       F10      F01       F20  ...       F33       F24       F15       F06
# 0  1.0  0.051267  0.69956  0.002628  ...  0.000046  0.000629  0.008589  0.117206
# 1  1.0 -0.092742  0.68494  0.008601  ... -0.000256  0.001893 -0.013981  0.103256
# 2  1.0 -0.213710  0.69225  0.045672  ... -0.003238  0.010488 -0.033973  0.110047
# 3  1.0 -0.375000  0.50219  0.140625  ... -0.006679  0.008944 -0.011978  0.016040
# 4  1.0 -0.513250  0.46564  0.263426  ... -0.013650  0.012384 -0.011235  0.010193


X = data2.values
y = data.iloc[:, -1].values
y = y.reshape(len(y), 1)
print(X.shape)
print(y.shape)


# ================损失函数
# 高层次需要正则化
# λ越小，容易过拟合；λ越大，容易欠拟合
def sigmoid(z):
    return 1 / (1 + np.exp(-z))


def costFunction(X, y, theta, lamda):
    A = sigmoid(X @ theta)
    first = y * np.log(A)
    second = (1 - y) * np.log(1 - A)

    # 正则化项
    reg = np.sum(np.power(theta[1:], 2)) * (lamda / (2 * len(X)))

    return -np.sum(first + second) / len(X) + reg


# 测试
theta = np.zeros((28, 1))
lamda = 1
cost_init = costFunction(X, y, theta, lamda)
print(cost_init)


# 0.6931471805599454


# 梯度下降函数
def gradientDescent(X, y, theta, alpha, iters, lamda):
    costs = []

    for i in range(iters):
        reg = theta[1:] * (lamda / len(X))
        reg = np.insert(reg, 0, values=0, axis=0)

        theta = theta - (X.T @ (sigmoid(X @ theta) - y)) * alpha / len(X) - reg * lamda
        cost = costFunction(X, y, theta, lamda)
        costs.append(cost)

        if i % 1000 == 0:
            print(cost)

    return theta, costs


alpha = 0.001
iters = 200000
lamda = 0.001
# lamda = 0.1
# # λ越小，容易过拟合；λ越大，容易欠拟合


theta_final, costs = gradientDescent(X, y, theta, alpha, iters, lamda)
print(theta_final)


# 准确率
def predict(X, theta):
    prob = sigmoid(X @ theta)
    return [1 if x >= 0.5 else 0 for x in prob]


y_ = np.array(predict(X, theta_final))
y_pre = y_.reshape(len(y_), 1)
# 求取均值
acc = np.mean(y_pre == y)
print(acc)
# 0.8135593220338984


x = np.linspace(-1.2, 1.2, 200)
xx, yy = np.meshgrid(x, x)
z = feature_mapping(xx.ravel(), yy.ravel(), 6).values
zz = z @ theta_final
zz = zz.reshape(xx.shape)

fig, ax = plt.subplots()
ax.scatter(data[data['Accepted'] == 0]['Test 1'], data[data['Accepted'] == 0]['Test 2'], c='r', marker='x', label='y=0')
ax.scatter(data[data['Accepted'] == 1]['Test 1'], data[data['Accepted'] == 1]['Test 2'], c='b', marker='o', label='y=1')
ax.legend()
ax.set(xlabel='Test 1', ylabel='Test 2')

plt.contour(xx, yy, zz, 0)
plt.show()



