import datetime

import numpy as np

from helpers import *

A = np.array(([1, 0], [0, 10]))
b = np.array([0, 0])
def calculate_L(b, A):
    """Calculate the smoothness constant for f"""
    # ***************************************************
    # INSERT YOUR CODE HERE
    # TODO: compute ||A.T*A||
    ATA = np.dot(A.T, A)
    norm_ATA = np.linalg.norm(ATA, 2)
    # ***************************************************
    # raise NotImplementedError
    # ***************************************************
    # INSERT YOUR CODE HERE
    # TODO: compute L = smoothness constant of f
    L = norm_ATA / len(b)
    # ***************************************************
    # raise NotImplementedError
    return L

def gradient_descent(b, A, initial_x, max_iters, gamma):
    """Gradient descent algorithm."""
    # Define parameters to store x and objective func. values
    xs = [initial_x]
    objectives = []
    x = initial_x
    for n_iter in range(max_iters):
        # ***************************************************
        # INSERT YOUR CODE HERE
        # TODO: compute gradient and objective function
        # grad, Axmb = compute_gradient(b, A, x)
        grad = np.dot(A, x)
        obj = np.dot(np.dot(x.T, A), x) * 0.5
        gamma = np.dot(grad.T, grad) / np.dot(np.dot(grad.T, A), grad)
        x = x - gamma * grad
        # ***************************************************
        # raise NotImplementedError
        # store x and objective function value
        xs.append(x)
        objectives.append(obj)
        print("Gradient Descent({bi}/{ti}): objective={l}".format(
              bi=n_iter, ti=max_iters - 1, l=obj))

    return objectives, xs

# from gradient_descent import *
from plots import gradient_descent_visualization

def gradient_descent_2(b, A, initial_x, max_iters, gamma):
    """Gradient descent algorithm."""
    # Define parameters to store x and objective func. values
    xs = [initial_x]
    objectives = []
    x = initial_x
    for n_iter in range(max_iters):
        # ***************************************************
        # INSERT YOUR CODE HERE
        # TODO: compute gradient and objective function
        # grad, Axmb = compute_gradient(b, A, x)
        grad = np.dot(A, x)
        obj = np.dot(np.dot(x.T, A), x) * 0.5
        # gamma = np.dot(grad.T, grad) / np.dot(np.dot(grad.T, A), grad)
        x = x - gamma * grad
        # ***************************************************
        # raise NotImplementedError
        # store x and objective function value
        xs.append(x)
        objectives.append(obj)
        print("Gradient Descent({bi}/{ti}): objective={l}".format(
              bi=n_iter, ti=max_iters - 1, l=obj))

    return objectives, xs

# Define the parameters of the algorithm.
max_iters = 100

gamma = 0.1

# Initialization
x_initial = np.ones(A.shape[1])

# Start gradient descent.
start_time = datetime.datetime.now()

def line_search():
    return gradient_descent(b, A, x_initial, max_iters, gamma)[0]

def L_search():
    return gradient_descent_2(b, A, x_initial, max_iters, gamma)[0]