import numpy as np
import matplotlib.pyplot as plt

lines = np.loadtxt('../data/lr_dataset.csv', delimiter=',', dtype=float)
x_total = lines[:, 0:2]
y_total = lines[:, 2]
plt.scatter(x_total[y_total == 0, 0], x_total[y_total == 0, 1], c='r', marker='o', s=10)
plt.scatter(x_total[y_total == 1, 0], x_total[y_total == 1, 1], c='b', marker='x', s=10)
plt.show()

# 划分训练集和测试集，随机的打乱
np.random.seed(0)
ratio = 0.7
split = int(len(lines) * ratio)
idx = np.random.permutation(len(x_total))
x_total = x_total[idx]
y_total = y_total[idx]
x_train, y_train = x_total[:split], y_total[:split]
x_test, y_test = x_total[split:], y_total[split:]

def logistic(z):
    return 1 / (1 + np.exp(-z))

def acc(y_pred, y_true):
    return np.sum(y_true == y_pred) / len(y_true)

def auc(pred, y_true):
    idx = np.argsort(pred)
    y_true = y_true[idx]
    tp = np.cumsum(y_true)
    tpr = (tp[-1] - tp) / tp[-1]
    fp = np.cumsum(1 - y_true)
    fpr = (fp[-1] - fp) / fp[-1]
    s = 0.0
    for i in range(1,len(tpr)):
        s += abs(fpr[i]-fpr[i-1])*tpr[i]
    return s

def GD(X,X_test,y_train,y_test,num_steps, learning_rate,l2_coef):
    theta = np.random.normal(size=(X.shape[1],))
    train_acc = []
    test_acc = []
    train_auc = []
    test_auc = []
    for i in range(num_steps):
        pred = logistic(X @ theta)
        grad = - X.T @ (y_train - pred) + l2_coef * theta
        theta = theta - learning_rate * grad
        train_acc.append(acc(pred>=0.5, y_train))
        pred_test = logistic(X_test @ theta)
        test_acc.append(acc(pred_test>=0.5, y_test))
        train_auc.append(auc(pred, y_train))
        test_auc.append(auc(pred_test, y_test))
    return theta, train_acc, test_acc, train_auc, test_auc

num_steps = 250
learning_rate = 0.002
l2_coef = 1.0
X = np.concatenate((x_train, np.ones((x_train.shape[0], 1))), axis=-1)
X_test = np.concatenate((x_test, np.ones((x_test.shape[0], 1))), axis=-1)
theta, train_acc, test_acc, train_auc, test_auc = GD(X,X_test,y_train,y_test,num_steps, learning_rate, l2_coef)
print(theta)
print(train_acc)
print(test_acc)
print(train_auc)
print(test_auc)


steps = np.arange(num_steps)  # 0,1,...,249

# ====== 图1：AUC ======
plt.figure(figsize=(6,4))
plt.scatter(steps, train_auc, color='tab:blue', marker='o', s=5, label='Train AUC')
plt.scatter(steps, test_auc,  color='tab:orange', marker='x', s=5, label='Test  AUC')
plt.xlabel('Step')
plt.ylabel('AUC')
plt.title('AUC vs. Training Steps')
plt.legend()
plt.ylim(0.8, 0.95)
plt.grid(True)
plt.show()

# ====== 图2：Accuracy ======
plt.figure(figsize=(6,4))
plt.scatter(steps, train_acc, color='tab:green', marker='o', s=5, label='Train Accuracy')
plt.scatter(steps, test_acc,  color='tab:red',   marker='x', s=5, label='Test  Accuracy')
plt.xlabel('Step')
plt.ylabel('Accuracy')
plt.title('Accuracy vs. Training Steps')
plt.legend()
plt.ylim(0.8, 0.95)
plt.grid(True)
plt.show()

# 1. 画全部数据散点
plt.scatter(x_total[y_total == 0, 0], x_total[y_total == 0, 1],
            c='r', marker='o', s=10, label='class 0')
plt.scatter(x_total[y_total == 1, 0], x_total[y_total == 1, 1],
            c='b', marker='x', s=10, label='class 1')

# 2. 生成决策边界的直线
x_min, x_max = x_total[:, 0].min() - 0.5, x_total[:, 0].max() + 0.5
x_line = np.array([x_min, x_max])
y_line = -(theta[0] * x_line + theta[2]) / theta[1]   # 注意 theta 顺序

# 3. 画线
plt.plot(x_line, y_line, color='green', linewidth=2, label='decision boundary')

plt.xlabel('x0')
plt.ylabel('x1')
plt.title('Logistic Regression Decision Boundary')
plt.legend()
plt.grid(True)
plt.show()
