import os
import numpy as np
import torch
import torch.nn as nn
from matplotlib import pyplot as plt


class MyLoss(nn.Module):
    def __init__(self):
        super(MyLoss, self).__init__()
        self.A = np.array(([1, 0], [0, 10]))
        self.A = torch.tensor(self.A, dtype=float)

    def forward(self, x):
        loss = (x.T @ self.A @ x) * 0.5
        return loss



def plot_xs_figure(xs):

    xlist = np.linspace(-3.0, 3.0, 100)
    ylist = np.linspace(-3.0, 3.0, 100)

    X, Y = np.meshgrid(xlist, ylist)

    Z = X**2 + 10*(Y**2) + 10
    fig,ax=plt.subplots(1,1)

    cp = ax.contourf(X, Y, Z)
    fig.colorbar(cp)
    ax.set_title('Filled Contours Plot')
    ax.set_xlabel('x1')
    ax.set_ylabel('x2')
    print(xs.transpose()[0])
    print(xs.shape)
    plt.contour(X,Y,Z)
    plt.plot(xs.transpose()[0], xs.transpose()[1])
    plt.scatter(xs.transpose()[0], xs.transpose()[1])
    plt.show()

def plot_objs_figure(ls_xs, ls_objs, const_xs=None,
                     const_objs=None, armijo_xs=None,
                     armijo_objs=None):
    plt.xlabel('Number of steps')
    plt.ylabel('Objective Function')
    #plt.yscale("log")
    plt.plot(range(len(ls_objs)), ls_objs,'r', label='gradient descent with linear search')
    plt.plot(range(len(const_objs)), const_objs,'b', label='gradient descent with constant learning rate')
    plt.plot(range(len(armijo_objs)), armijo_objs,'g', label='gradient descent with SGD')
    plt.legend(loc='upper right')
    plt.show()

def gradient_descent(b, A, initial_x, max_iters, gamma):
    """Gradient descent algorithm."""
    # Define parameters to store x and objective func. values
    xs = [initial_x]
    objectives = []
    x = initial_x
    for n_iter in range(max_iters):
        xs.append(x)
        grad = np.dot(A, x)
        obj = np.dot(np.dot(x.T, A), x) * 0.5
        gamma = np.dot(grad.T, grad) / np.dot(np.dot(grad.T, A), grad)
        x = x - gamma * grad
        
        objectives.append(obj)
        print("Gradient Descent({bi}/{ti}): objective={l}".format(
              bi=n_iter, ti=max_iters - 1, l=obj))

    return objectives, xs
loss_fun = MyLoss()
x = torch.ones([2, 1], dtype=float, requires_grad=True)
optimizer = torch.optim.SGD([x], lr=0.04)
ls_xs = []
ls_objs = []
for i in range(100):
    print(np.array(x.detach()).T[0])
    ls_xs.append(np.array(x.detach()).T[0])
    loss = loss_fun(x)
    ls_objs.append(loss.data.numpy()[0][0])
    optimizer.zero_grad()
    loss.backward()
    optimizer.step()
    print('loss:', loss.data.numpy()[0][0])
    print(x.data.numpy())
plot_xs_figure(np.array(ls_xs))
from line_search import line_search, L_search
line_objs = line_search()

L_objs = L_search()
print(line_objs)
plot_objs_figure(ls_xs=np.array(ls_xs), ls_objs=np.array(line_objs), const_objs=np.array(L_objs), armijo_objs=np.array(ls_objs))