""" main step of the SED training
Includes:
Data modules
Optimizer setups
Train/Val/Test steps
Pytorch-lightning trainer
"""

import random
import torch
import warnings

import pytorch_lightning as pl
import numpy as np

from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint, LearningRateMonitor
from pytorch_lightning.loggers import TensorBoardLogger
from training.data_augms import BYOLATransform
from nets import CRNN, SelfModel
from opt import opt_setup, model_init
from datasets import get_dataloaders
from utils import sed_arg_parser, reconfig, backup_configs
from training.sssl_trainer import SSSLTrainer


# A complete SED system train/test pipeline
warnings.filterwarnings("ignore")

# load parameters
parser = sed_arg_parser()
args, unknown_args = parser.parse_known_args()
args, data_paths, feat_configs, opt_configs = reconfig(args, unknown_args)

# freeze seed
seed = opt_configs["seed"]
if seed:
    torch.random.manual_seed(seed)
    np.random.seed(seed)
    random.seed(seed)
    pl.seed_everything(seed)
    torch.cuda.manual_seed(seed)
    torch.cuda.manual_seed_all(seed)
    torch.backends.cudnn.deterministic = True

# create label encoder + data loader
label_encoder, train_loader, val_loader, test_loader = get_dataloaders(
    opt_configs["batch_sizes"],
    data_paths,
    feat_configs,
    opt_configs["batch_size_val"],
    opt_configs["num_workers"],
    aug_transform=BYOLATransform((opt_configs["feat_crop_rate"], opt_configs["time_crop_rate"])),
    selfsl_usage=[0, 0, 0, 1, 1]
)

# define model
sed_student = CRNN(**opt_configs["net"])
sed_student = model_init(
    sed_student, 
    opt_configs["pretrained_ckpt"], 
    opt_configs["pretrained_prefix"],
    opt_configs["desirable_prefix"],
    
)
encoder_st = SelfModel(sed_student, opt_configs["feat_dim"], opt_configs["feat_dim"], self_target=opt_configs["self_target"])

# define optimizer
opt_n_schd = opt_setup(
    opt_configs,
    len(train_loader),
    encoder_st,
)

# define training module
sed_module = SSSLTrainer(
    data_paths,
    opt_configs,
    label_encoder,
    encoder_st,
    opt_n_schd,
    train_loader,
    val_loader,
    test_loader
)

# define logger and callbacks
logger = TensorBoardLogger(args.log_dir, args.exp_name)
backup_configs(logger.log_dir, data_paths, feat_configs, opt_configs)

callbacks = [
    EarlyStopping(
        monitor="obj_metric",
        patience=opt_configs["early_stop_patience"],
        verbose=True,
        mode="max",
    ),
    ModelCheckpoint(
        logger.log_dir,
        monitor="obj_metric",
        save_top_k=2,
        filename='{epoch}-{obj_metric:.3f}',
        mode="max",
        save_last=True,
    ),
    LearningRateMonitor(logging_interval='step')
]

# define trainer
trainer = pl.Trainer(
max_epochs=opt_configs["n_epochs"],
callbacks=callbacks,
devices=args.gpus,
accelerator="gpu",
strategy=opt_configs["backend"],
accumulate_grad_batches=opt_configs["accumulate_batches"],
logger=logger,
default_root_dir=logger.log_dir,
gradient_clip_val=opt_configs["gradient_clip"],
check_val_every_n_epoch=opt_configs["validation_interval"],
num_sanity_val_steps=0,
log_every_n_steps=opt_configs["log_every_n_steps"],
limit_train_batches=opt_configs["limit_train_batches"],
limit_val_batches=opt_configs["limit_val_batches"],
limit_test_batches=opt_configs["limit_test_batches"],
precision=opt_configs["precision"],
deterministic=True,
)

# Start training/inference
if args.test_from_checkpoint is not None:
    test_state_dict = torch.load(args.test_from_checkpoint)["state_dict"]
else:
    trainer.fit(sed_module, ckpt_path=args.resume_from_checkpoint)
    best_path = trainer.checkpoint_callback.best_model_path
    print(f"best model: {best_path}")
    test_state_dict = torch.load(best_path)["state_dict"]

sed_module.load_state_dict(test_state_dict)
trainer.test(sed_module)