import torch

from tqdm import tqdm
import tensorboardX

from random import sample
from cartpole_dynamic import CartPole
from model import nnModel
import math
from argparse import ArgumentParser
from torch.optim.lr_scheduler import CosineAnnealingLR
import numpy as np

args = ArgumentParser()
args.add_argument("--lr", type=float, default=1e-3)
args.add_argument("--epochs", type=int, default=25000)
args.add_argument("--batch_size", type=int, default=64)
args.add_argument("--output_dir", type=str, default="output")
args.add_argument("--dt", type=float, default=0.033)
args.add_argument("--polelen", type=float, default=1.5)
args.add_argument("--m_cart", type=float, default=20)
args.add_argument("--m_pole", type=float, default=10)
args.add_argument("--g", type=float, default=9.81)
args.add_argument("--timesteps", type=int, default=100)
args = args.parse_args()


def is_save_iter(epoch):
    return (epoch + 1) % 5000 == 0


device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
writter = tensorboardX.SummaryWriter(args.output_dir)


dt = args.dt  # Delta t
polelen = args.polelen  # Length of pole
m_cart = args.m_cart  # Mass of cart
m_pole = args.m_pole  # Mass of pole
g = args.g  # Accerleration due to gravity
N = args.timesteps  # Number of time steps
batch_size = args.batch_size
time = torch.arange(0, N, device=device) * dt
model = CartPole(dt, polelen, m_cart, m_pole, g).to(device)
control_nn = nnModel(4, 1).to(device)
lr = args.lr
epochs = args.epochs
pbar = tqdm(range(epochs))
optimizer = torch.optim.Adam(control_nn.parameters(), lr=lr)
sched = CosineAnnealingLR(optimizer, epochs, lr * 0.01)

for epoch in pbar:
    loss = 0
    state = torch.zeros((batch_size, 4), device=device)
    curriculm_rate = 2 * epoch / epochs
    curriculm_rate = min(1.0, curriculm_rate)
    angle_err = 2 * math.pi * curriculm_rate
    state[:, 2] = (
        math.pi * torch.ones_like(state[:, 2])
        - torch.rand_like(state[:, 2]) * angle_err
        + angle_err / 2
    )
    state[:, 0] = torch.rand_like(state[:, 0]) * 1.0 - 0.5
    state[:, 1] = torch.rand_like(state[:, 1]) * 0.2 - 0.1
    hx = None
    lambda_ = 1.0 / N / 2
    for i in range(N - 1):
        force, hx = control_nn(state, hx)
        state, _ = model(state, force)
        state[:, 2] = torch.fmod(state[:, 2], 2 * math.pi)
        angle_loss = torch.nn.functional.mse_loss(
            state[:, 2], math.pi * torch.ones_like(state[:, 2])
        )
        pos_loss = torch.nn.functional.mse_loss(
            state[:, 0], torch.zeros_like(state[:, 0])
        )
        force_loss = force**2
        loss = loss + angle_loss * lambda_ + pos_loss * 0.1 + 0.00 * force_loss
        lambda_ = min(1.0, lambda_ + 1.0 / N / 2)
    control_nn.zero_grad()
    loss = loss.mean()
    loss.backward()
    optimizer.step()
    sched.step()
    writter.add_scalar("loss", loss.item(), epoch)
    pbar.set_description_str(f"Loss: {loss.item():.2f}")
    if is_save_iter(epoch):
        torch.save(
            control_nn.state_dict(), f"{args.output_dir}/control_nn_{epoch+1}.pth"
        )
nn_path = f"{args.output_dir}/control_nn.pth"
torch.save(control_nn.state_dict(), nn_path)
print(f"Model saved at {nn_path}")
