from tqdm.autonotebook import tqdm

import numpy as np
import seaborn as sns
import matplotlib
import matplotlib.pyplot as plt

import pandas as pd

import time

import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import *
from IPython.display import set_matplotlib_formats
set_matplotlib_formats('png', 'pdf')

def set_seed(seed):
    torch.manual_seed(seed)
    np.random.seed(seed)

torch.backends.cudnn.deterministic=True
set_seed(42)

from IPython.display import set_matplotlib_formats
set_matplotlib_formats('png', 'pdf')

def set_seed(seed):
    torch.manual_seed(seed)
    np.random.seed(seed)

torch.backends.cudnn.deterministic=True
set_seed(42)


def train_simple_network(model, loss_func, training_loader, epochs=20, device="cpu"):
    # Yellow step is done here. We create the optimizer and move the model to the compute device
    # SGD is Stochastic Gradient Decent over the parameters $\Theta$
    optimizer = torch.optim.SGD(model.parameters(), lr=0.001)

    # Place the model on the correct compute resource (CPU or GPU)
    model.to(device)
    # The next two for loops handle the Red steps, iterating through all the data (batches) multiple times (epochs)
    for epoch in tqdm(range(epochs), desc="Epoch"):

        model = model.train()  # Put our model in training mode
        running_loss = 0.0

        for inputs, labels in tqdm(training_loader, desc="Batch", leave=False):
            # Move the batch of data to the device we are using. this is the last red step
            inputs = moveTo(inputs, device)
            labels = moveTo(labels, device)

            # First a yellow step, prepare the optimizer. Most PyTorch code will do this first to make sure everything is in a clean and ready state.

            # PyTorch stores gradients in a mutable data structure. So we need to set it to a clean state before we use it.
            # Otherwise, it will have old information from a previous iteration
            optimizer.zero_grad()

            # The next two lines of code perform the two blue steps
            y_hat = model(inputs)  # this just computed $f_\theta(\boldsymbol{x_i})$

            # Compute loss.
            loss = loss_func(y_hat, labels)

            # Now the remaining two yellow steps, compute the gradient and ".step()" the optimizer
            loss.backward()  # $\nabla_\Theta$ just got computed by this one call

            # Now we just need to update all the parameters
            optimizer.step()  # $\Theta_{k+1} = \Theta_k − \eta \cdot \nabla_\Theta \ell(\hat{y}, y)$

            # Now we are just grabbing some information we would like to have
            running_loss += loss.item()


# Caption: This code defines a simple training loop, which can be used to learn the parameters $\Theta$ to almost any neural network $f_\Theta(\cdot)$ we will use in this book.

#Crate a 1-dimensional input
X = np.linspace(0, 20, num=200)
#create an output
y = X + np.sin(X)*2 + np.random.normal(size=X.shape)
sns.scatterplot(x=X, y=y)
plt.show()