import numpy as np
import matplotlib.pyplot as plt
from python_ai.ML.lin_regression.gradient_descent.xlib import *
from python_ai.ML.logic_regression.xlib_logic_regression import *
from python_ai.common.xcommon import sep

plt.figure(figsize=[16, 5])
spr = 1
spc = 3


def gd_on_data2d(data_filename, spn=0):

    m = n = 0

    def process_data(data, label='data'):
        nonlocal m, n, spn
        m = len(data)
        n = 2
        x1 = data[:, 0].reshape(m, 1)
        x2 = data[:, 1].reshape(m, 1)
        y = data[:, 2].reshape(m, 1)

        y_selector1 = (y == 1)[:, 0]
        plt_data_1_x1 = x1[y_selector1, :].ravel()
        plt_data_1_x2 = x2[y_selector1, :].ravel()

        y_selector0 = np.invert(y_selector1)
        plt_data_0_x1 = x1[y_selector0, :].ravel()
        plt_data_0_x2 = x2[y_selector0, :].ravel()

        # plot dots
        spn += 1
        pltx = plt.subplot(spr, spc, spn)
        pltx.scatter(plt_data_1_x1, plt_data_1_x2, s=1, label=label + '_y1')
        pltx.scatter(plt_data_0_x1, plt_data_0_x2, s=1, label=label + '_y0')

        return pltx, x1, x2, y

    # load data
    data_loaded = np.loadtxt(data_filename, delimiter=',')
    plt1, x1_loaded, x2_loaded, y = process_data(data_loaded, 'loaded')

    # scale data
    sep('scaling ...')
    features_loaded = data_loaded[:, 0:-1]
    data_scaled, mu, sigma = scale_feature_data(features_loaded)
    data_scaled = np.c_[data_scaled, data_loaded[:, -1]]
    plt2, x1_scaled, x2_scaled, y = process_data(data_scaled, 'scaled')

    # gradient descent
    X = np.c_[np.ones([m, 1]), x1_scaled, x2_scaled]
    num_iters_ori = num_iters = 1000
    alpha = 1e-8
    theta, J_history = gradient_descent_algorithm_logic_regression(X, y, alpha=alpha, num_iters=num_iters)
    num_iters = len(J_history)
    if num_iters < num_iters_ori:
        print('CONVERGED!')
    else:
        print('NOT CONVERGED!!!')
    print(f'THETA = {theta}')

    # test performence
    matchCount, accuracy, predictY = testLogRegres(X, y, theta)
    print(f'matchCount, accuracy = {matchCount, accuracy}')
    matchCount, accuracy, predictY = testLogRegres_my_better(X, y, theta)
    print(f'my better: matchCount, accuracy = {matchCount, accuracy}')

    # plot the result on scaled data
    plt_theta = transform_theta_of_logic_regression(theta)
    print(f'PLT THETA = {plt_theta}')
    plt_line_x = np.array([x1_scaled.min(), x2_scaled.max()])
    plt_line_x_len = len(plt_line_x)
    plt_line_y = np.c_[np.ones([plt_line_x_len, 1]), plt_line_x.reshape(plt_line_x_len, 1)].dot(plt_theta)
    plt2.plot(plt_line_x, plt_line_y, 'r-')

    # unscale the scaled data back
    print('Unscaling ...')

    # plot on the original data
    plt_theta_unscaled = scale_theta_back(x1_loaded, x2_loaded, plt_theta)
    print(f'PLT THETA UNSCLAED = {plt_theta_unscaled}')
    plt_line_x = np.array([x1_loaded.min(), x1_loaded.max()])
    plt_line_x_len = len(plt_line_x)
    plt_line_y = np.c_[np.ones([plt_line_x_len, 1]), plt_line_x.reshape(plt_line_x_len, 1)].dot(plt_theta_unscaled)
    plt1.plot(plt_line_x, plt_line_y, 'r-')

    plt1.legend()
    plt2.legend()

    # the condition of cost function values in the iterations
    spn += 1
    plt.subplot(spr, spc, spn)
    plt.plot(J_history)


gd_on_data2d(r'./data/ex2data1.txt')

plt.show()
