from pathlib import Path
import torch
import yaml
import math
import os
import time
from mynn.datasets import build_dataset, build_dataloader
from mynn.models import build_model
from mynn.losses import L1Loss
from mynn.utils import save_checkpoint, load_checkpint, get_root_logger
from mynn.utils.misc import get_time_str

# Read yaml file.
YAML_PATH = 'options/SpyNet4TOF/spynet.yaml'
with open(YAML_PATH, 'r', encoding='utf-8') as f:
    opt = yaml.load(f, Loader=yaml.SafeLoader)

# Set logger.
log_dir = Path('experiments') / opt['exp_name']
os.makedirs(log_dir, exist_ok=True)
log_file = log_dir / f"{opt['train']['log_file']}"
logger = get_root_logger(log_file=log_file)

# TODO: Add support for single-node multi-GPU data parallel training.
# Choose CUDA or CPU.
device = "cuda" if opt['train']['cuda'] else "cpu"

# Set GPU list.
gpu_list = ",".join([str(v) for v in opt['train']['gpu_list']])
os.environ['CUDA_VISIBLE_DEVICES'] = gpu_list
logger.info(f'gpu_list:[{gpu_list}]')

# Get dataset and dataloader.
train_dataset = build_dataset(dataset_opt=opt['train']['dataset'], phase='train')
val_dataset = build_dataset(dataset_opt=opt['train']['dataset'], phase='val')

train_dataloader = build_dataloader(dataset=train_dataset, opt=opt, phase='train')
val_dataloader = build_dataloader(dataset=val_dataset, opt=opt, phase='val')

# Get total_iters and total_epochs.
num_iter_per_epoch = math.ceil(len(train_dataset) / opt['train']['batch_size'])
total_iters = int(opt['train']['total_iter'])
total_epochs = math.ceil(total_iters / (num_iter_per_epoch))

# Build model.
model = build_model(opt)
model.to(device)

# Build loss function.
loss_fn = L1Loss()
logger.info(f'Loss: {loss_fn.__class__.__name__}')

# Get optimizer.
optimizer = torch.optim.Adam(model.parameters(), lr=opt['train']['lr'], weight_decay=opt['train']['weight_decay'])
logger.info(f'Optimizer: {optimizer.__class__.__name__}')

current_iter = 0
current_epoch = 0

# Load checkpoint.
if opt['train']['resume']:
    model, current_iter = load_checkpint(opt=opt, model=model)

best_loss = math.inf
losses = 0
logger.info('Training start.')
start_time = time.time()
for current_epoch in range(total_epochs):
    for data in train_dataloader:
        model.train()

        ref = data['ref'].to(device)
        supp = data['supp'].to(device)
        flow = data['flow'].to(device)

        # Forward propagation.
        flow_hat = model(ref, supp)

        # Compute loss.
        loss = loss_fn(flow, flow_hat)

        losses += loss.item()

        # Update parameters.
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        # Count.
        current_iter += 1

        # Print information.
        if current_iter % opt['train']['dis_freq'] == 0:
            # Get time.
            end_time = time.time()
            used_time = end_time - start_time  # second
            remaining_time = (total_iters - current_iter) / opt['train']['dis_freq'] * used_time / 3600  # hour
            logger.info(
                f"Epoch:{current_epoch} Step:{current_iter} Average loss:{losses/opt['train']['dis_freq']:.6f} Time:{(used_time):.1f} s Remaining Time:{remaining_time:.1f} h"
            )
            # Reset.
            losses = 0
            start_time = time.time()

        # Validate.
        if current_iter % opt['train']['val_freq'] == 0:
            with torch.no_grad():
                model.eval()
                val_losses = 0
                for step, val_data in enumerate(val_dataloader):
                    ref = val_data['ref'].to(device)
                    supp = val_data['supp'].to(device)
                    flow = val_data['flow'].to(device)
                    # Forward propagation.
                    flow_hat = model(ref, supp)
                    # Compute loss.
                    val_loss = loss_fn(flow, flow_hat)
                    val_losses += loss.item()
                avg_val_loss = val_losses / (step + 1)
                logger.info('-----Validation Result-----')
                logger.info(f'Average loss: {avg_val_loss:.6f}')
                if avg_val_loss < best_loss:
                    best_loss = avg_val_loss
                    save_checkpoint(opt=opt, model=model, current_iter=current_iter)

        # Save checkpoint.
        if current_iter % opt['train']['save_freq'] == 0 or current_iter >= total_iters:
            save_checkpoint(opt=opt, current_iter=current_iter, model=model)

        # End.
        if current_iter >= total_iters:
            logger.info('Training complete.')
            break
