#####################################################################################
import torch
import torch.nn.functional as F
import parameters as p
import os
from tqdm import tqdm
import cv2
from utils import *
from model import Unet
from dataset import get_dataloader
import tensorboardX

import warnings
warnings.filterwarnings("ignore", category=UserWarning)


def train(denoise_model: torch.nn.Module,
          optimizer, dataloader, scheduler:torch.optim.lr_scheduler._LRScheduler,device='cuda',
          epochs: int = p.EPOCHS, save_on_epoch: int = p.SAVE_ON_EPOCH,
          sample_on_epoch: int = p.SAMPLE_ON_EPOCH):

    # load checkpoint
    if os.path.exists(p.MODEL_SAVE_PATH):
        print("####################### Model checkpoint detected #######################")
        model_dict = torch.load(p.MODEL_SAVE_PATH)
        denoise_model.load_state_dict(model_dict)
        print('####################### Model checkpoint loaded #######################')
    else:
        print("initialize model")

        for w in denoise_model.parameters():
            if w.dim() > 1:
                torch.nn.init.xavier_uniform_(w)

    if os.path.exists(p.OPTIMIZER_SAVE_PATH):
        print(
            "####################### Optimizer checkpoint detected #######################")
        optim_dict = torch.load(p.OPTIMIZER_SAVE_PATH)
        optimizer.load_state_dict(optim_dict)
        print("####################### Optimizer checkpoint loaded #######################")

    cv2.namedWindow("Samples", cv2.WINDOW_AUTOSIZE)

    # TRAINING LOOP
    for epoch in range(epochs):

        print(f"========== epoch : {epoch} of {p.EPOCHS} ==========")

        loss_accumulator = 0.
        local_setp = 0
        with tqdm(dataloader,dynamic_ncols=True) as tqdmDataloader:
            for x_o, _ in tqdmDataloader:

                x_o = x_o.to(device)
                t = torch.randint(0, p.T, [p.BATCH_SIZE, ], device=device)

                noise = torch.randn_like(x_o, device=device)
                
                x_noise = inject_noise(x_o, t, device=device)

                predicted_noise = denoise_model(x_noise, t)
                loss = F.mse_loss(predicted_noise, noise,reduction="mean")
                # optimizer.zero_grad()
                loss.backward()
                torch.nn.utils.clip_grad_norm_(denoise_model.parameters(),p.grad_clip)
                optimizer.step()

                
                loss_accumulator += loss.item()

                tqdmDataloader.set_postfix({
                    "epoch: ":epoch,
                    "loss: ": loss.item(),
                    "image shape: ":x_o.shape,
                    "LR": optimizer.state_dict()['param_groups'][0]['lr']
                })
        scheduler.step()


            

        # SAVE
        if epoch != 0 and epoch % save_on_epoch == 0:
            if not os.path.exists(p._CHECKPOINT_SAVE_PATH):
                os.mkdir(p._CHECKPOINT_SAVE_PATH)
            torch.save(optimizer.state_dict(), p.OPTIMIZER_SAVE_PATH)
            torch.save(denoise_model.state_dict(), p.MODEL_SAVE_PATH)
            print("####################### Checkpoin saved #######################")

        # SAMPLE
        if epoch != 0 and epoch % sample_on_epoch == 0:
            x_sample = sample(denoise_model, device=device).cpu()[:32] * 255
            x_sample = x_sample.to(dtype=torch.int8)
            x_sample = einops.rearrange(
                x_sample, '(b1 b2) c h w -> (b1 w) (b2 h) c', b1=2,)
            cv2.imshow("Samples", x_sample.numpy())
            cv2.waitKey(10)


if __name__ == "__main__":

    device = 'cuda' if torch.cuda.is_available() else 'cpu'
    denoise_model = Unet(isGrayImage=True).to('cuda')
    optimizer = torch.optim.AdamW(denoise_model.parameters(),lr=p.LEARNING_RATE,weight_decay=p.weight_decay)
    
    train_dataloader = get_dataloader(persistent_workers=True, pin_memory=True)

    warmUpScheduler = torch.optim.lr_scheduler.LinearLR(optimizer,start_factor=1e-5,end_factor=1,total_iters=p.warm_up_steps)
    cosineScheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer,5,eta_min=0,last_epoch=-1)
    
    scheduler = torch.optim.lr_scheduler.SequentialLR(optimizer,schedulers=[warmUpScheduler,cosineScheduler],milestones=[p.warm_up_steps],last_epoch=-1,verbose=False)
    train(denoise_model, device=device,scheduler=scheduler,
          dataloader=train_dataloader, optimizer=optimizer,)
