"""
逻辑线性边界
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt

from logistic_regression import LogisticRegression

data = pd.read_csv('iris.csv')
iris_types = ['SETOSA', 'VERSICOLOR', 'VIRGINICA']
x_axis = 'petal_length'
y_axis = 'petal_width'

for iris_type in iris_types:
    plt.scatter(
        data[x_axis][data['class'] == iris_type],
        data[y_axis][data['class'] == iris_type],
        label=iris_type
    )
plt.legend()
plt.show()

# sample(frac=1) 无放回地随机抽取全部数据
# reset_index(drop=True) 丢弃原始索引（不将其保存为新列），直接生成新的整数索引
data_shuffle = data.sample(frac=1).reset_index(drop=True)
num_examples = data_shuffle.shape[0]
x_train = data_shuffle[[x_axis, y_axis]].values.reshape(num_examples, 2)
y_train = data_shuffle['class'].values.reshape(num_examples, 1)

# np.random.seed(18)
# data_shuffle = np.random.permutation(data.values)
# num_examples = data_shuffle.shape[0]
# 数据是 object 类型，需要将其转换为 float 类型用于后续预处理计算
# x_train = data_shuffle[:, 2:4].astype(float)
# y_train = data_shuffle[:, -1].reshape(num_examples, 1)

# 模型训练
max_iterations = 1000
normalize_data = False
logistic_regression = LogisticRegression(x_train, y_train, normalize_data=normalize_data)
thetas, loss_histories = logistic_regression.train(max_iterations)
labels = logistic_regression.unique_labels

# 损失函数梯度下降
plt.plot(range(len(loss_histories[0])), loss_histories[0], label=labels[0])
plt.plot(range(len(loss_histories[1])), loss_histories[1], label=labels[1])
plt.plot(range(len(loss_histories[2])), loss_histories[2], label=labels[2])
plt.legend()
plt.show()

# 准确率
y_predict = logistic_regression.predict(x_train)
accuracy = np.sum(y_predict == y_train) / y_train.shape[0] * 100
print('Accuracy:', accuracy, '%')

# 绘制决策边界
x1_min, x1_max = np.min(x_train[:, 0]), np.max(x_train[:, 0])
x2_min, x2_max = np.min(x_train[:, 1]), np.max(x_train[:, 1])
n = 150
X1 = np.linspace(x1_min, x1_max, n)
X2 = np.linspace(x2_min, x2_max, n)

Y_SETOSA = np.zeros((n, n))
Y_VERSICOLOR = np.zeros((n, n))
Y_VIRGINICA = np.zeros((n, n))

# TODO 此处不能将数据标准化！
#  因为基于一条数据本身，并没有均值和方差，也就无法进行 Z-score 标准化。
#  那么对于一个标准化预测的模型而言，没有标准化的数据所预测的结果是不对的。
for x1_index, x1 in enumerate(X1):
    for x2_index, x2 in enumerate(X2):
        d = np.array([[x1, x2]])
        prediction = logistic_regression.predict(d)
        if prediction == 'SETOSA':
            Y_SETOSA[x1_index, x2_index] = 1
        elif prediction == 'VERSICOLOR':
            Y_VERSICOLOR[x1_index, x2_index] = 1
        elif prediction == 'VIRGINICA':
            Y_VIRGINICA[x1_index, x2_index] = 1

for iris_type in iris_types:
    plt.scatter(
        x_train[(y_train == iris_type).flatten(), 0],
        x_train[(y_train == iris_type).flatten(), 1],
        label=iris_type
    )

# contour() 二维等高线图，适用于展示三维数据在二维平面上的分布
# X 和 Y 是网格坐标，每个网格点对应一个 Z 值。
# Z 是二维数组，形状需与 X 和 Y 一致（即 Z[i,j] 对应坐标 (X[i,j], Y[i,j]) 的高度值）
# 常用于绘制地形图、气象图和决策边界等。
plt.contour(X1, X2, Y_SETOSA)
plt.contour(X1, X2, Y_VERSICOLOR)
plt.contour(X1, X2, Y_VIRGINICA)
plt.legend()
plt.show()
