"""
逻辑非线性边界
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt

from logistic_regression import LogisticRegression

data = pd.read_csv('microchips-tests.csv')
validity = [0, 1]
x_axis = 'param_1'
y_axis = 'param_2'

for val in validity:
    plt.scatter(
        data[x_axis][data['validity'] == val],
        data[y_axis][data['validity'] == val],
        label=val
    )
plt.legend()
plt.show()

data_shuffle = data.sample(frac=1).reset_index(drop=True)
num_examples = data_shuffle.shape[0]
x_train = data_shuffle[[x_axis, y_axis]].values.reshape(num_examples, 2)
y_train = data_shuffle['validity'].values.reshape(num_examples, 1)

# 模型训练
max_iterations = 100000
polynomial_degree = 5
normalize_data = False
logistic_regression = LogisticRegression(
    x_train, y_train, polynomial_degree=polynomial_degree, normalize_data=normalize_data)
thetas, loss_histories = logistic_regression.train(max_iterations)
labels = logistic_regression.unique_labels

# 损失函数梯度下降
plt.plot(range(len(loss_histories[0])), loss_histories[0], label=labels[0])
plt.plot(range(len(loss_histories[1])), loss_histories[1], label=labels[1])
plt.legend()
plt.show()

# 准确率
y_predict = logistic_regression.predict(x_train)
accuracy = np.sum(y_predict == y_train) / y_train.shape[0] * 100
print(f'Accuracy: {accuracy:.4f}%')

# 绘制决策边界
x1_min, x1_max = np.min(x_train[:, 0]), np.max(x_train[:, 0])
x2_min, x2_max = np.min(x_train[:, 1]), np.max(x_train[:, 1])
n = 150
X1 = np.linspace(x1_min, x1_max, n)
X2 = np.linspace(x2_min, x2_max, n)
Y = np.zeros((n, n))

for x1_index, x1 in enumerate(X1):
    for x2_index, x2 in enumerate(X2):
        d = np.array([[x1, x2]])
        prediction = logistic_regression.predict(d)
        if prediction == 0:
            Y[x1_index, x2_index] = 1

for val in validity:
    plt.scatter(
        x_train[(y_train == val).flatten(), 0],
        x_train[(y_train == val).flatten(), 1],
        label=val
    )

plt.contour(X1, X2, Y)
plt.legend()
plt.show()
