import numpy as np
import matplotlib.pyplot as plt
import sklearn
import sklearn.datasets
import scipy.io
import math

import opt_utils
import testCase

def update_parameters_with_grads(parameters, grads, learning_rate):

    L = len(parameters) // 2

    for i in range(1, L+1):
        parameters["W" + str(i)] = parameters["W" + str(i)] - learning_rate * grads["dW" + str(i)]
        parameters["b" + str(i)] = parameters["b" + str(i)] - learning_rate * grads["db" + str(i)]
    
    return parameters

def random_mini_batch(X, Y, mini_batch_size = 64):

    np.random.seed(0)
    m = X.shape[1]
    mini_batches = []

    permutation = np.random.permutation(np.arange(m))
    shuffled_X = X[:, permutation]
    shuffled_Y = Y[:, permutation]     #Y为一维的向量
    
    num_minibatch = math.floor(m / mini_batch_size)
    for i in range(num_minibatch):
        mini_batch_X = shuffled_X[:, i*mini_batch_size:(i+1)*mini_batch_size]
        mini_batch_Y = shuffled_Y[:, i*mini_batch_size:(i+1)*mini_batch_size]
        
        mini_batch = (mini_batch_X, mini_batch_Y)
        mini_batches.append(mini_batch)

        if m / mini_batch_size != 0:
            mini_batch_X = shuffled_X[:, num_minibatch*mini_batch_size:]
            mini_batch_Y = shuffled_Y[:, num_minibatch*mini_batch_size:]

            mini_batch = (mini_batch_X, mini_batch_Y)
            mini_batches.append(mini_batch)

    return mini_batches

def initialize_velocity(parameters):

    L = len(parameters) // 2
    V = {}

    for i in range(1, L+1):
        V["dW" + str(i)] = np.zeros_like(parameters["W" + str(i)])
        V["db" + str(i)] = np.zeros_like(parameters["b" + str(i)])

    return V

def update_parameters_momentum(parameters, grads, V, beta, learning_rate):

    L = len(parameters) // 2

    for i in range(1, L+1):
        V["dW" + str(i)] = beta * V["dW" + str(i)] + (1-beta) * grads["dW" + str(i)]
        V["db" + str(i)] = beta * V["db" + str(i)] + (1-beta) * grads["db" + str(i)]

        parameters["W" + str(i)] = parameters["W" + str(i)] - learning_rate * V["dW" + str(i)]
        parameters["b" + str(i)] = parameters["b" + str(i)] - learning_rate * V["db" + str(i)]

    return parameters, V

def initialize_adam(parameters):

    L = len(parameters) // 2
    V = {}
    S = {}

    for i in range(1, L+1):
        V["dW" + str(i)] = np.zeros_like(parameters["W" + str(i)])
        V["db" + str(i)] = np.zeros_like(parameters["b" + str(i)])

        S["dW" + str(i)] = np.zeros_like(parameters["W" + str(i)])
        S["db" + str(i)] = np.zeros_like(parameters["b" + str(i)])

    return V, S

def update_parameters_adam(parameters, grads, V, S, t, learning_rate = 0.01, beta1 = 0.9, beta2 = 0.999, epsilon = 1e-8):

    L = len(parameters) // 2
    V_correct = {}
    S_correct = {}

    for i in range(1, L+1):
        V["dW" + str(i)] = beta1 * V["dW" + str(i)] + (1-beta1) * grads["dW" + str(i)]
        V["db" + str(i)] = beta1 * V["db" + str(i)] + (1-beta1) * grads["db" + str(i)]

        S["dW" + str(i)] = beta2 * S["dW" + str(i)] + (1-beta2) * np.square(grads["dW" + str(i)])
        S["db" + str(i)] = beta2 * S["db" + str(i)] + (1-beta2) * np.square(grads["db" + str(i)])

        V_correct["dW" + str(i)] = V["dW" + str(i)] / (1 - np.power(beta1, t))
        V_correct["db" + str(i)] = V["db" + str(i)] / (1 - np.power(beta1, t))

        S_correct["dW" + str(i)] = S["dW" + str(i)] / (1 - np.power(beta2, t))
        S_correct["db" + str(i)] = S["db" + str(i)] / (1 - np.power(beta2, t))

        parameters["W" + str(i)] = parameters["W" + str(i)] - learning_rate * V["dW" + str(i)] / (np.sqrt(S_correct["dW" + str(i)]) + epsilon)
        parameters["b" + str(i)] = parameters["b" + str(i)] - learning_rate * V["db" + str(i)] / (np.sqrt(S_correct["db" + str(i)]) + epsilon)

    return parameters, V, S

train_X, train_Y = opt_utils.load_dataset(is_plot=False)

def model(X, Y, layer_dims, optimizer, learning_rate = 0.0007, mini_batch_size = 64, beta = 0.9, beta1 = 0.9, beta2 = 0.999, epsilon = 1e-8, num_epochs = 10000, print_cost = True, is_plot = True):

    costs = []
    t = 0       #Adam每学习完一个minibatch就增加1
    seed = 10

    parameters = opt_utils.initialize_parameters(layer_dims)

    if optimizer == "gd":
        pass
    elif optimizer == "momentum":
        V = initialize_velocity(parameters)
    elif optimizer == "adam":
        V, S = initialize_adam(parameters)
    else:
        print("error")

    for i in range(num_epochs):
        seed = seed + 1
        mini_batches = random_mini_batch(X, Y, mini_batch_size)

        for mini_batch in mini_batches:
            (mini_batch_X, mini_batch_Y) = mini_batch

            A3, cache = opt_utils.forward_propagation(mini_batch_X, parameters)
            cost = opt_utils.compute_cost(A3, mini_batch_Y)
            grads = opt_utils.backward_propagation(mini_batch_X, mini_batch_Y, cache)

            if optimizer == "gd":
                parameters = update_parameters_with_grads(parameters, grads, learning_rate)
            elif optimizer == "momentum":
                parameters, V = update_parameters_momentum(parameters, grads, V, beta, learning_rate)
            elif optimizer == "adam":
                t = t + 1
                parameters, V, S = update_parameters_adam(parameters, grads, V, S, t, learning_rate, beta1, beta2, epsilon)

        if i % 100 == 0:
            costs.append(cost)

        if print_cost and i % 1000 == 0:
            print(cost)

    if is_plot:
        plt.plot(costs)
        plt.show()

    return parameters

layers_dims = [train_X.shape[0],5,2,1]
# parameters = model(train_X, train_Y, layers_dims, optimizer="gd",is_plot=True)

#使用动量的梯度下降
parameters = model(train_X, train_Y, layers_dims, optimizer="adam", beta=0.9, is_plot=True)

preditions = opt_utils.predict(train_X,train_Y,parameters)
plt.title("Model with Gradient Descent optimization")
axes = plt.gca()
axes.set_xlim([-1.5, 2.5])
axes.set_ylim([-1, 1.5])
opt_utils.plot_decision_boundary(lambda x: opt_utils.predict_dec(parameters, x.T), train_X, train_Y)