import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf

np.random.seed(1)
plt.figure(figsize=[12, 9])
spr = 2
spc = 2
spn = 0
spn += 1
plt1 = plt.subplot(spr, spc, spn)  # train
plt.title('Training Set')
spn += 1
plt2 = plt.subplot(spr, spc, spn)  # test
plt.title('Testing Set')
spn += 1
plt3 = plt.subplot(spr, spc, spn)  # cost curve

data_loaded = np.loadtxt(r'../../../ML/logic_regression/data/ex2data1.txt', delimiter=',')
m = len(data_loaded)
print(m)
print(data_loaded[:5])

# scale
x = data_loaded[:, :-1]
_, n = x.shape
y = data_loaded[:, -1]
mu = np.mean(x, axis=0)
sigma = np.std(x, axis=0)
x -= mu
x /= sigma
data_loaded = np.c_[x, y.reshape(m, 1)]

# shuffle
np.random.seed(666)
rnd_idx = np.random.permutation(m)
data_loaded = data_loaded[rnd_idx]

# split and plot
split_idx = int(m * 0.7)
data_train, data_test = np.split(data_loaded, [split_idx])
# training data set
x_train = data_train[:, :-1]
x1_train = data_train[:, 0]
x2_train = data_train[:, 1]
y_train = data_train[:, -1]
y_train_1_idx = y_train == 1
y_train_0_idx = np.invert(y_train_1_idx)
x1_train_1 = x1_train[y_train_1_idx]
x2_train_1 = x2_train[y_train_1_idx]
x1_train_0 = x1_train[y_train_0_idx]
x2_train_0 = x2_train[y_train_0_idx]
plt1.scatter(x1_train, x2_train, c=y_train)
# testing data set
x_test = data_test[:, :-1]
x1_test = data_test[:, 0]
x2_test = data_test[:, 1]
y_test = data_test[:, -1]
plt2.scatter(data_test[y_test == 0, 0], data_test[y_test == 0, 1], c='b')
plt2.scatter(data_test[y_test == 1, 0], data_test[y_test == 1, 1], c='y')

# logistic regression by tf
ph_x = tf.compat.v1.placeholder(tf.float32, [None, n], 'placeholder_x')
ph_y = tf.compat.v1.placeholder(tf.float32, [None, 1], 'placeholder_y')
print(ph_x)
print(ph_y)

w = tf.Variable(tf.random.normal([n, 1]), name='w')
b = tf.Variable(tf.random.normal([1]), name='b')
print(w)
print(b)

h = tf.sigmoid(tf.matmul(ph_x, w) + b, name='h')
print(h)

predict = tf.cast(h > 0.5, dtype=tf.float32, name='predict')
print(predict)

acc_score = tf.reduce_mean(
    tf.cast(
        tf.equal(
            h > 0.5,
            ph_y > 0.5  # ATTENTION Compare float numbers by == is not stable.
        ),
        dtype=tf.float32
    ),
    name='acc_score'
)
print(acc_score)

cost = tf.math.negative(tf.reduce_mean(ph_y * tf.math.log(h) + (1 - ph_y) * tf.math.log(1 - h)), name='cost')
print(cost)

train = tf.compat.v1.train.GradientDescentOptimizer(learning_rate=0.01)\
    .minimize(cost)

with tf.compat.v1.Session() as sess:
    y_train = y_train.reshape(-1, 1)
    y_test = y_test.reshape(-1, 1)
    sess.run(tf.compat.v1.global_variables_initializer())
    iters = 1500
    cost_arr = np.zeros(iters)
    group = iters // 10
    for i in range(iters):
        wv, bv, cost_v, _, acc = sess.run([w, b, cost, train, acc_score], feed_dict={ph_x: x_train, ph_y: y_train})
        cost_arr[i] = cost_v
        if i % group == 0:
            print(f'#{i + 1}: cost = {cost_v}, acc={acc}, w.T = {np.transpose(wv)}, b = {bv}')
    if i % group != 0:
        print(f'#{i + 1}: cost = {cost_v}, acc={acc}, w.T = {np.transpose(wv)}, b = {bv}')
    # tf.compat.v1.assign(w, wv)
    # tf.compat.v1.assign(b, bv)
    h_train, s_train = sess.run([h, acc_score], feed_dict={ph_x: x_train, ph_y: y_train})
    print(f'Training accuracy score = {s_train}')
    # print(np.c_[h_train > 0.5, y_train == 1])
    h_test, s_test = sess.run([h, acc_score], feed_dict={ph_x: x_test, ph_y: y_test})
    print(f'Testing accuracy score = {s_test}')


    def get_y_from_x(x1, w, b):
        w = w.ravel()
        return - (b + w[0] * x1) / w[1]


    plt1_x = np.array([x1_train.min(), x1_train.max()])
    plt1_y = get_y_from_x(plt1_x, wv, bv)
    plt1.plot(plt1_x, plt1_y, 'r-')

    plt2_x = np.array([x1_test.min(), x1_test.max()])
    plt2_y = get_y_from_x(plt2_x, wv, bv)
    plt2.plot(plt2_x, plt2_y, 'r-')

    # let the axis consistent between training and testing data set
    ymin11, ymax11 = plt1_y.min(), plt1_y.max()
    ymin12, ymax12 = x2_train.min(), x2_train.max()
    ymin21, ymax21 = plt2_y.min(), plt2_y.max()
    ymin22, ymax22 = x2_test.min(), x2_test.max()
    ymin = np.min([ymin11, ymin12, ymin21, ymin22])
    ymax = np.max([ymax11, ymax12, ymax21, ymax22])
    plt1.set_ylim([ymin, ymax])
    plt2.set_ylim([ymin, ymax])
    xmin1, xmax1 = plt1_x.min(), plt1_x.max()
    xmin2, xmax2 = plt2_x.min(), plt2_x.max()
    xmin = np.min([xmin1, xmin2])
    xmax = np.max([xmax1, xmax2])
    plt1.set_xlim([xmin, xmax])
    plt2.set_xlim([xmin, xmax])

    plt3.plot(cost_arr, label='cost function value')
    plt3.legend()

    # show all plotting
    plt.show()
