"""
Train model and eval model helpers.
"""
from __future__ import print_function

import numpy as np
import cvxopt
import cvxopt.solvers


def train_model(data, model, learning_rate=0.001, batch_size=16,
                num_steps=1000, shuffle=True):
    """Implements the training loop of stochastic gradient descent.

    Performs stochastic gradient descent with the indicated batch_size.

    If shuffle is true:
        Shuffle data at every epoch, including the 0th epoch.

    If the number of example is not divisible by batch_size, the last batch
    will simply be the remaining examples.

    Args:
        data(dict): Data loaded from io_tools
        model(LinearModel): Initialized linear model.
        learning_rate(float): Learning rate of your choice
        batch_size(int): Batch size of your choise.
        num_steps(int): Number of steps to run the updated.
        shuffle(bool): Whether to shuffle data at every epoch.

    Returns:
        model(LinearModel): Returns a trained model.
    """

    # Performs gradient descent. (This function will not be graded.)
    
    x = data['image']
    y = data['label']


    # Shuffle data in 0th epoch.
    if shuffle == True:
        indices = np.random.permutation(x.shape[0])
        x = x[indices]
        y = y[indices]
    
    # Create indices for batches.
    quotient = x.shape[0] // batch_size
    remainder = x.shape[0] % batch_size
    if remainder != 0:
        num_batches = quotient + 1
        batch_size_last = x.shape[0] - quotient * batch_size
        start_id = np.arange(0, num_batches) * batch_size
        end_id = start_id + batch_size
        end_id[num_batches-1] = start_id[num_batches-1] + batch_size_last
    else:
        num_batches = quotient
        start_id = np.arange(0, num_batches) * batch_size
        end_id = start_id + batch_size

    # Perform gradient descent.
    current_batch = -1
    for step in range(num_steps):
        # Select current batch from x and y.
        current_batch = current_batch + 1
        x_batch = x[start_id[current_batch]:end_id[current_batch]]
        y_batch = y[start_id[current_batch]:end_id[current_batch]]
        # Update weights.
        update_step(x_batch, y_batch, model, learning_rate)
        # Check if one epoch is finished.
        if current_batch == num_batches - 1:
            if shuffle == True:
                indices = np.random.permutation(x.shape[0])
                x = x[indices]
                y = y[indices]
            # Go to the beginning batch.
            current_batch = -1
    
    #return model


def update_step(x_batch, y_batch, model, learning_rate):
    """Performs on single update step, (i.e. forward then backward).

    Args:
        x_batch(numpy.ndarray): input data of dimension (N, ndims).
        y_batch(numpy.ndarray): label data of dimension (N, 1).
        model(LinearModel): Initialized linear model.
    """
    # Implementation here. (This function will not be graded.)

    f = model.forward(x_batch)
    grad = model.backward(f, y_batch)
    model.w = model.w - learning_rate * grad


def train_model_qp(data, model):
    """Computes and sets the optimal model wegiths (model.w) using a QP solver.

    Args:
        data(dict): Data from utils.data_tools.preprocess_data.
        model(SupportVectorMachine): Support vector machine model.
    """
    P, q, G, h = qp_helper(data, model)
    P = cvxopt.matrix(P, P.shape, 'd')
    q = cvxopt.matrix(q, q.shape, 'd')
    G = cvxopt.matrix(G, G.shape, 'd')
    h = cvxopt.matrix(h, h.shape, 'd')
    sol = cvxopt.solvers.qp(P, q, G, h)
    z = np.array(sol['x'])
    # Implementation here (do not modify the code above)
    
    # Set model.w
    k = data['image'].shape[1]
    model.w = z[:k+1]


def qp_helper(data, model):
    """Prepares arguments for the qpsolver.

    Args:
        data(dict): Data from utils.data_tools.preprocess_data.
        model(SupportVectorMachine): Support vector machine model.

    Returns:
        P(numpy.ndarray): P matrix in the qp program. (k+1+n, k+1+n)
        q(numpy.ndarray): q matrix in the qp program. (k+1+n, 1)
        G(numpy.ndarray): G matrix in the qp program. (2n, k+1+n)
        h(numpy.ndarray): h matrix in the qp program. (2n, 1)
    """
    P = None
    q = None
    G = None
    h = None
    # Implementation here.

    # Construct P.
    n = data['image'].shape[0]
    k = data['image'].shape[1]
    P = np.identity(k+1+n)
    P[:k+1] = P[:k+1] * model.w_decay_factor
    P[k+1:] = P[k+1:] * 0

    # Construct q.
    q = np.zeros((k+1+n, 1))
    q[k+1:] = 1

    # Construct h.
    h = np.zeros((2*n, 1))
    h[:n] = -1

    # Construct G.

    # Append 1 to x.
    x = data['image']
    one = np.ones((n, 1))
    x = np.concatenate((x, one), axis=1)
    # Multiply x and -y by row.
    y = data['label']
    yx = -x * y
    # Construct upper part of G, (n, k+1+n)
    right = -np.identity(n)
    G_upper = np.concatenate((yx, right), axis=1)

    # Construct lower part of G, (n, k+1+n)
    zeros_left = np.zeros((n, k+1))
    G_lower = np.concatenate((zeros_left, right), axis=1)

    # Concatenate upper and lower parts.
    G = np.concatenate((G_upper, G_lower), axis=0)

    return P, q, G, h


def eval_model(data, model):
    """Performs evaluation on a dataset.

    Args:
        data(dict): Data loaded from io_tools.
        model(LinearModel): Initialized linear model.

    Returns:
        loss(float): model loss on data.
        acc(float): model accuracy on data.
    """
    # Implementation here.

    # Loss.
    x = data['image']
    y = data['label']
    f = model.forward(x)
    loss = model.total_loss(f, y)

    # Accuracy.
    y_predict = model.predict(f)
    correct_count = np.sum(y_predict == y)
    acc = correct_count / len(y_predict)

    return loss, acc
