import time
import torch
import torchvision
import pytorch_lightning as pl

from torch.utils.data import DataLoader
from pytorch_lightning import LightningModule

from model import LyPoseModel
from utils.utils import MetricLogger
from utils.coco_utils import get_coco_api_from_dataset
from utils.coco_eval import CocoEvaluator


class LyPoseDetector(LightningModule):
    def __init__(
        self, 
        train_batch_size=6, # with gpu ram 10G
        eval_per_epoch=10,
        lr=1e-3,
        momentum=0.999,
        weight_decay=1e-3,
        lr_steps=[8, 11],
        lr_gamma=0.1,
        val_loader=None
    ) -> None:
        super().__init__()
        self.save_hyperparameters(ignore=['val_loader']) # ignore will escape encoding init parameter to self.hparams
        self.model=LyPoseModel()
        self.metric_logger = MetricLogger(delimiter="  ")
        self.lr=1.5848931924611133e-05

        if val_loader:
            self.val_loader=val_loader
            self.eval_type=["keypoints"]
            self.coco = get_coco_api_from_dataset(self.val_loader.dataset)
            self.coco_evaluator = CocoEvaluator(self.coco, self.eval_type)
            self.val_ap=0

    def forward(self, img, targets=None):
        if targets:
            pred = self.model(img, targets)
        else:
            pred = self.model(img)
        return pred

    def training_step(self, batch, batch_idx):
        images, targets = batch
        loss_dict = self(images, targets)
        losses = sum(loss for loss in loss_dict.values())
        self.log('train loss', losses)
        self.log('val_ap', self.val_ap, prog_bar=True)
        return losses
    
    def validation_step(self, batch, batch_idx):
        if batch:
            images, targets = batch
            model_time = time.time()
            outputs = model(images)
            model_time = time.time() - model_time
            evaluator_time = time.time()
            res = {target["image_id"].item(): output for target, output in zip(targets, outputs)}
            self.coco_evaluator.update(res)
            evaluator_time = time.time() - evaluator_time
            self.metric_logger.update(model_time=model_time, evaluator_time=evaluator_time)
    
    def validation_epoch_end(self, val_step_outputs):
        if self.current_epoch >0:
            self.metric_logger.synchronize_between_processes()
            print("Averaged stats:", self.metric_logger)
            self.coco_evaluator.synchronize_between_processes()
            self.coco_evaluator.accumulate()
            val_ap = self.coco_evaluator.summarize()
            self.val_ap=val_ap
            self.coco_evaluator = CocoEvaluator(self.coco, self.eval_type)

    def configure_optimizers(self):
        params = self.model.parameters()
        optimizer = torch.optim.SGD(
            params, lr=self.hparams.lr, momentum=self.hparams.momentum, weight_decay=self.hparams.weight_decay)
        # lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=self.hparams.lr_steps, gamma=self.hparams.lr_gamma)
        lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=40, eta_min=self.lr*2)
        return [optimizer], [lr_scheduler]

    # def train_dataloader(self):
    #     train_loader=get_dataloader(
    #         self.train_dataset, 
    #         batch_size=self.hparams.train_batch_size, 
    #         image_set="train"
    #     )
    #     return train_loader
    
    # def val_dataloader(self):
    #     test_loader=get_dataloader(self.test_dataset, image_set="test")
    #     return test_loader



if __name__ == "__main__":
    from pytorch_lightning import seed_everything
    from dataset import get_coco_dataset, get_dataloader
    from pytorch_lightning.callbacks import ModelCheckpoint

    seed_everything(0)
    ################################# hyper paramaters ##########################################
    save_top_k_model=1
    batchsize=6 # for 10G gpu ram
    accumulate_grad_batches=4 # real train batchsize equal to batchsize*accumulate_grad_batches
    ################################# end of hyper paramaters ###############################
    
    ckpt_save_dir='/project/train/models/'
    train_dataset=get_coco_dataset(
        img_folder='/home/data/1262/', 
        annfile='/project/train/src_repo/ly_pose/person_keypoints_train.json',
        )
    test_dataset=get_coco_dataset(
        img_folder='/home/data/1262/', 
        annfile='/project/train/src_repo/ly_pose/person_keypoints_test.json',
    )

    train_loader=get_dataloader(train_dataset, batch_size=6, image_set="train")
    test_loader=get_dataloader(test_dataset, image_set="test")

    model = LyPoseDetector(val_loader=test_loader) # weights: "" or "coco"

    ckpt_callback = ModelCheckpoint(
        save_top_k=save_top_k_model,
        dirpath=ckpt_save_dir,
        monitor="val_ap",
        mode='max',
        filename="lypose-{epoch:03d}-{val_ap:.2f}"
    )

    my_callbacks=[ckpt_callback]
    trainer = pl.Trainer(
        accelerator="gpu", devices=1, # strategy="ddp",
        resume_from_checkpoint=None,
        auto_lr_find=True,
        enable_progress_bar=False,
        max_epochs=4000,
        max_steps=-1,
        precision=16,
        callbacks=my_callbacks,
        accumulate_grad_batches=4, # real_batchsize = accumulate_grad_batches * batchsize
        check_val_every_n_epoch=20,
    )
    # trainer.tune(model, train_dataloaders=train_loader, val_dataloaders=test_loader) # find a lr and set lr to self.lr
    trainer.fit(model, train_dataloaders=train_loader, val_dataloaders=test_loader)

