import numpy as np
import matplotlib.pyplot as plt

np.random.seed(1)
plt.figure(figsize=[12, 9])
spr = 2
spc = 2
spn = 0
spn += 1
plt1 = plt.subplot(spr, spc, spn)
spn += 1
plt2 = plt.subplot(spr, spc, spn)
spn += 1
plt3 = plt.subplot(spr, spc, spn)

data_loaded = np.loadtxt(r'../data/ex2data1.txt', delimiter=',')
m = len(data_loaded)
print(m)
print(data_loaded[:5])

# scale
x = data_loaded[:, :-1]
y = data_loaded[:, -1]
mu = np.mean(x, axis=0)
sigma = np.std(x, axis=0)
x -= mu
x /= sigma
data_loaded = np.c_[x, y.reshape(m, 1)]

# shuffle
rnd_idx = np.random.permutation(m)
data_loaded = data_loaded[rnd_idx]

# split and plot
split_idx = int(m * 0.7)
data_train, data_test = np.split(data_loaded, [split_idx])
x1_train = data_train[:, 0]
x2_train = data_train[:, 1]
y_train = data_train[:, -1]
y_train_1_idx = y_train == 1
y_train_0_idx = np.invert(y_train_1_idx)
x1_train_1 = x1_train[y_train_1_idx]
x2_train_1 = x2_train[y_train_1_idx]
# plt1.scatter(x1_train_1, x2_train_1, c='r')
x1_train_0 = x1_train[y_train_0_idx]
x2_train_0 = x2_train[y_train_0_idx]
# plt1.scatter(x1_train_0, x2_train_0, c='b')
plt1.scatter(x1_train, x2_train, c=y_train)
x1_test = data_test[:, 0]
x2_test = data_test[:, 1]
y_test = data_test[:, -1]
# y_test_1_idx = y_test == 1
# y_test_0_idx = np.invert(y_test_1_idx)
# x1_test_1 = x1_test[y_test_1_idx]
# x2_test_1 = x2_test[y_test_1_idx]
# plt3.scatter(x1_test_1, x2_test_1, c='r')
# x1_test_0 = x1_test[y_test_0_idx]
# x2_test_0 = x2_test[y_test_0_idx]
# plt3.scatter(x1_test_0, x2_test_0, c='b')
plt.scatter(data_test[y_test==0,0],data_test[y_test==0,1],c='b')
plt.scatter(data_test[y_test==1,0],data_test[y_test==1,1],c='y')


def model(X, theta):
    return X.dot(theta)


def sigmoid(z):
    return 1.0 / (1.0 + np.exp(-z))


def cost(h, y):
    m = len(h)
    return -1.0/m * np.sum((y * np.log(h) + (1-y) * np.log(1 - h)))


def grad(x, y, alpha=0.01, num_iter=5000):
    m, n = x.shape
    theta = np.zeros(n)
    J = np.zeros(num_iter)
    for i in range(num_iter):
        z = model(x, theta)
        h = sigmoid(z)
        J[i] = cost(h, y)
        dt = 1/m * x.T.dot(h - y)
        theta -= alpha * dt
    return h, theta, J


def score(h, y):
    return np.mean(y == (h > 0.5))


# gradient descent
h_train, theta, J = grad(np.c_[np.ones(split_idx), x1_train, x2_train], y_train)
print(f'theta = {theta}')
plt2.plot(J)

# score
print(f'train score = {score(h_train, y_train)}')
h_test = sigmoid(model(np.c_[np.ones(m - split_idx), x1_test, x2_test], theta))
print(f'test score = {score(h_test, y_test)}')


def get_plt_line_y(plt_x, logTheta):
    return - 1.0 / logTheta[2] * (logTheta[0] + plt_x * logTheta[1])


line_x_train = np.array([x1_train.min(), x1_train.max()])
line_y_train = get_plt_line_y(line_x_train, theta)
plt1.plot(line_x_train, line_y_train)

line_x_test = np.array([x1_test.min(), x1_test.max()])
line_y_test = get_plt_line_y(line_x_test, theta)
plt3.plot(line_x_test, line_y_test)

plt.show()
