import sys
import os
import argparse
from pathlib import Path
import numpy as np
from ais.core import *
import torch
from torch.utils.data import DataLoader, Dataset
from torch import optim
import torch.nn.functional as F
from torchvision import transforms

from ignite.engine import create_supervised_trainer, create_supervised_evaluator, Events
import ignite.contrib.handlers.tensorboard_logger as ignite_tb_logger
from ignite.contrib.handlers import ProgressBar
import ignite.metrics as ignite_metrics
import ignite.utils as ignite_utils
from ignite.handlers import ModelCheckpoint, Checkpoint

from ais.data import *
from ais.model import SegHighResolutionNet
from ais.loss import BCEDiceLoss
from ais.metrics import AISLandmarkMetric
from ais.trainer import SupervisedTrainer
from ais.handler import StatsHandler
from ais.utils import cfg, cfg_from_file, set_random_seed, get_state_dict, setup_logger, print_torch_version
from ais.image import cv_show, cv_keyboard_run, cv_resize_shape, convert_mask_to_image
from ais import __version__ as ais_version


class SpineSegTrainer(SupervisedTrainer):
    gpu_device = 'cuda'
    def __init__(self, model,
                 criterion=None,
                 optimizer=None,
                 lr_scheduler=None,
                 gpus=1,
                 distributed=False,
                 evaluators=['validation'],
                 **kwargs):
        super(SpineSegTrainer, self).__init__(model, criterion=criterion,
                                                 optimizer=optimizer,
                                                 lr_scheduler=lr_scheduler,
                                                 gpus=gpus,
                                                 distributed=distributed,
                                                 **kwargs)
        self.evaluators = evaluators
        self.tb_logger = None

    def prepare_data(self):
        """准备dataset"""
        data_root = cfg.DATASET.ROOT
        data_type = cfg.DATASET.DATA_TYPE
        stats = cfg.MODEL.STATS

        data_path = Path(data_root)
        image_items = sorted(data_path.joinpath('data', 'train').glob('*.jpg'))
        x_train, x_val = train_test_split(image_items, test_size=0.2, shuffle=True)

        self.train_dataset = eval(data_type)(data_path, 'train',
                                             x_train,
                                             cfg.MODEL.SEMANTIC_SEG.SHAPE,
                                             init_train_seg_albu,
                                             stats,
                                             kp_num=68)
        self.val_dataset = eval(data_type)(data_path,
                                           'train',
                                           x_val,
                                           cfg.MODEL.SEMANTIC_SEG.SHAPE,
                                           init_test_seg_albu,
                                           stats,
                                           kp_num=68)

        image_items = sorted(data_path.joinpath('data', 'test').glob('*.jpg'))
        self.test_dataset = eval(data_type)(data_path,
                                           'test',
                                            image_items,
                                            cfg.MODEL.SEMANTIC_SEG.SHAPE,
                                            init_test_seg_albu,
                                            stats,
                                            kp_num=68)

    def train_dataloader(self):
        bs = cfg.DATA_LOADER.BS
        return DataLoader(self.train_dataset, batch_size=bs, shuffle=True, num_workers=cfg.DATA_LOADER.TRAIN_NUM_WORKERS)

    def val_dataloader(self):
        bs = cfg.DATA_LOADER.BS
        return DataLoader(self.val_dataset, batch_size=bs, shuffle=False, num_workers=cfg.DATA_LOADER.VALID_NUM_WORKERS)

    def test_dataloader(self):
        bs = cfg.DATA_LOADER.BS
        return DataLoader(self.test_dataset, batch_size=bs, shuffle=False, num_workers=cfg.DATA_LOADER.TEST_NUM_WORKERS)

    def create_evaluator(self, metrics:dict, output_transform=lambda x, y, y_pred: (y_pred, y,)):
        """
        创建模型评估器
        """
        return create_supervised_evaluator(self.model, metrics=metrics, device=self.device, non_blocking=True, output_transform=output_transform)

    def evaluator_metrics(self):
        """评估器的metric"""
        return {"IOU": ignite_metrics.IoU(ignite_metrics.ConfusionMatrix(num_classes=2)),
                "loss": ignite_metrics.Loss(self.criterion, device= self.gpu_device if self.distributed else None)}

    def pre_process(self, batch: Sequence[torch.Tensor], device: Optional[Union[str, torch.device]] = None, non_blocking: bool = False):
        """对batch做预处理"""
        x, y = batch
        return (
            ignite_utils.convert_tensor(x, device=device, non_blocking=non_blocking),
            ignite_utils.convert_tensor(y, device=device, non_blocking=non_blocking),
        )

    def post_process(self, batch: Sequence[torch.Tensor]):
        """对模型推理后的batch做后处理"""
        x, y, y_pred = batch
        assert y_pred.dim() == 4, 'Softmax2d requires a 4D tensor as input'
        y_pred = F.softmax(y_pred, 1, _stacklevel=5)
        # y_pred = torch.argmax(y_pred, dim=1)
        return x, y, y_pred

    def resume(self):
        checkpoint_fp = Path(cfg.MODEL.SEMANTIC_SEG.RESUME)
        if checkpoint_fp.exists() and checkpoint_fp.is_file():
            assert checkpoint_fp.exists(), "Checkpoint '{}' is not found".format(checkpoint_fp.as_posix())
            print("Resume from a checkpoint: {}".format(checkpoint_fp.as_posix()))
            checkpoint = torch.load(checkpoint_fp.as_posix())
            to_load = {'module_dict': self.model.module if hasattr(self.model, 'module') else self.model}
            Checkpoint.load_objects(to_load=to_load, checkpoint=checkpoint)

    def _inference(self, batch: Sequence[torch.Tensor]):
        """推理核心"""
        self.model.eval()
        with torch.no_grad():
            x, y = self.pre_process(batch, device=self.device, non_blocking=False)
            y_pred = self.model(x)
            return x, y, y_pred

    def start_inference(self, checkpoint_file, dataset:Dataset):
        """手动实现的dataloader推理"""
        self.device = "cpu"
        if self.gpus > 0:
            if torch.cuda.is_available():
                self.device = "cuda"
                self.model.to(self.device)

            if not self.distributed and self.gpus >= 2:
                self.model = torch.nn.DataParallel(self.model, device_ids=range(self.gpus))

        try:
            static_dict = get_state_dict(str(checkpoint_file), self.device)
            module_dict = static_dict['module_dict']
            # self.model.load_state_dict(module_dict)
            if hasattr(self.model, 'module'):
                self.model.module.load_state_dict(module_dict)
            else:
                self.model.load_state_dict(module_dict)
        except Exception as e:
            raise e

        infer_dataloader = DataLoader(dataset, batch_size=1, shuffle=False, num_workers=1)
        for i, batch in enumerate(infer_dataloader):
            x, y_gt, y_pred = self.post_process(self._inference(batch))
            y_pred = torch.argmax(y_pred, dim=1)
            ori_image = denormalize(x.squeeze(), cfg.MODEL.STATS[0], cfg.MODEL.STATS[1])
            ori_image *= 255
            ori_image = np.clip(ori_image.cpu().numpy(),  a_min=0., a_max=255.)
            ori_image = np.transpose(ori_image, (1, 2, 0)).astype(np.uint8)
            ori_image = cv_resize_shape(ori_image, ori_image.shape[0:2])

            palette = [[0, 0, 0], [0, 0, 255]]    # rgb
            show_pred_image, show_pred_mask = convert_mask_to_image(ori_image,
                                                              y_pred.squeeze().cpu().numpy().astype(np.uint8),
                                                              classes=2,
                                                              palette=palette)

            palette = [[0, 0, 0], [255, 0, 0]]
            show_ori_image, show_ori_mask = convert_mask_to_image(ori_image,
                                                               y_gt.squeeze().cpu().numpy().astype(np.uint8),
                                                               classes=2,
                                                               palette=palette)

            cv_show('infer', np.hstack((show_ori_image, show_pred_image)), 0.8)
            cv_keyboard_run('q')

    def build_trainer(self, max_eppchs:int):
        """构建trainer"""
        # create dataloader
        self.prepare_data()
        train_loader = self.train_dataloader()
        val_loader = self.val_dataloader()

        self.configure_optimizers(len(train_loader))

        # create trainer
        trainer = self.create_trainer_common(output_transform = lambda x, y, y_pred, loss: {'batch loss': loss.item()},
                                             train_loader=train_loader,
                                             checkpoint_every=cfg.EVERY.CHECKPOINT,
                                             output_path=cfg.OUTPUT_DIR,
                                             display_every=cfg.EVERY.DISPLAY,
                                             log_every=cfg.EVERY.LOG)

        # trainer.state.metrics = ['batch loss', ]

        def eval_output_transform(x, y_gt:TensorImage, y_pr):
            h_pr, w_pr = y_pr.size(2), y_pr.size(3)
            h_gt, w_gt = y_gt.size(1), y_gt.size(2)
            if h_pr != h_gt or w_pr != w_gt:
                y_pr = F.upsample(y_pr, size=(h_gt, w_gt), mode='bilinear')
            return y_pr, y_gt

        # create evaluator and binding dataloader
        evaluator_dict = {}
        metrics = self.evaluator_metrics()
        if 'validation' in self.evaluators and 'train' not in self.evaluators:
            validation_evaluator = self.create_evaluator(metrics=metrics, output_transform=eval_output_transform)
            def run_eval(engine):
                torch.cuda.synchronize()
                validation_evaluator.run(val_loader)

            trainer.add_event_handler(Events.EPOCH_STARTED(every=cfg.EVERY.VALIDATE), run_eval)
            trainer.add_event_handler(Events.COMPLETED, run_eval)

            evaluator_dict['validation'] = validation_evaluator
        elif 'train' in self.evaluators and 'validation' not in self.evaluators:
            train_evaluator = self.create_evaluator(metrics=metrics, output_transform=eval_output_transform)
            def run_eval(engine):
                torch.cuda.synchronize()
                train_evaluator.run(val_loader)
            trainer.add_event_handler(Events.EPOCH_STARTED(every=cfg.DATA_LOADER.VALIDATE), run_eval)
            trainer.add_event_handler(Events.COMPLETED, run_eval)

            evaluator_dict['train'] = train_evaluator
        elif 'train' in self.evaluators and 'validation' in self.evaluators:
            train_evaluator = self.create_evaluator(metrics=metrics, output_transform=eval_output_transform)
            validation_evaluator = self.create_evaluator(metrics=metrics, output_transform=eval_output_transform)
            def run_eval(engine):
                torch.cuda.synchronize()
                train_evaluator.run(train_loader)
                validation_evaluator.run(val_loader)

            trainer.add_event_handler(Events.EPOCH_STARTED(every=cfg.EVERY.VALIDATE), run_eval)
            trainer.add_event_handler(Events.COMPLETED, run_eval)

            evaluator_dict['train'] = train_evaluator
            evaluator_dict['validation'] = validation_evaluator

        self.config_print_logger(trainer)

        # config tb_logger
        self.config_tb_logger(trainer, evaluator_dict, metrics)

        # config checkpoint
        # self.config_checkpoint(trainer, filename_prefix="ais_landmark")

        # resume from checkpoint
        self.resume()

        # fire training
        # self.fire_trainer(train_loader, max_eppchs)
        trainer.run(train_loader, max_epochs=max_eppchs)

    def config_checkpoint(self, evalutor, filename_prefix="ais_landmark"):
        """配置checkpoint"""
        checkpoint_handler = ModelCheckpoint(
            os.path.join(cfg.OUTPUT_DIR + '/checkpoint'),
            "net",
            n_saved=10,
            filename_prefix=filename_prefix,
            create_dir=True,
            require_empty=False,
            #score_function=score_function,
            #score_name="validation_accuracy",
            #global_step_transform=ignite_tb_logger.global_step_from_engine(self.trainer)
        )
        evalutor.add_event_handler(Events.EPOCH_COMPLETED(every=cfg.EVERY.CHECKPOINT),
                                   checkpoint_handler,
                                   {"model": self.model, "opt": self.optimizer})

    def config_print_logger(self, trainer):
        """配置打印logger"""
        train_stats_handler = StatsHandler(name="trainer")
        train_stats_handler.attach(trainer)

    def config_tb_logger(self, trainer, evaluator_dict:dict={}, metrics:dict={}):
        """配置tensorboard的logger"""
        # create tb logger
        tb_logger = ignite_tb_logger.TensorboardLogger(cfg.OUTPUT_DIR+'/logs/')
        # tb_logger binding trainer (train loss and lr)
        tb_logger.attach(trainer,
                         log_handler=ignite_tb_logger.OutputHandler(tag="train",metric_names=['batch loss', ]),
                         event_name=Events.ITERATION_COMPLETED)
        tb_logger.attach(trainer,
                         log_handler=ignite_tb_logger.OptimizerParamsHandler(self.optimizer, param_name='lr'),
                         event_name=Events.ITERATION_STARTED)

        # tb_logger binding evaluator
        if evaluator_dict:
            for tag, evaluator in evaluator_dict.items():
                tb_logger.attach(
                    evaluator,
                    log_handler=ignite_tb_logger.OutputHandler(tag=tag,
                                              metric_names=list(metrics.keys()),
                                              global_step_transform=ignite_tb_logger.global_step_from_engine(trainer)),

                    event_name=Events.EPOCH_COMPLETED,
                )
                ProgressBar(persist=False, desc=tag+" evaluation").attach(evaluator)


def train():
    optimizer = eval('optim.'+cfg.SOLVER.OPTIMIZER)
    scheduler = eval('optim.lr_scheduler.' + cfg.SOLVER.SCHEDULER)
    criterion = BCEDiceLoss(eps=1e-7, activation='softmax2d')
    net = SegHighResolutionNet(cfg.MODEL.SEMANTIC_SEG.CHANNELS,
                              cfg.MODEL.SEMANTIC_SEG.NUM_CLASSES,
                              **cfg.MODEL.SEMANTIC_SEG.MODEL_PARAMETER)
    net.init_weights(cfg.MODEL.SEMANTIC_SEG.PRETRAINED)
    trainer = SpineSegTrainer(net,
                              criterion=criterion,
                              optimizer=optimizer,
                              lr_scheduler=scheduler,
                              gpus=cfg.NUM_GPUS,
                              distributed=False,
                              evaluators=['train','validation'],
                              optimizer__param=cfg.SOLVER.OPTIMIZER_PARAMETER['PARA'],
                              scheduler__param=cfg.SOLVER.SCHEDULER_PARAMETER['PARA'],
                              warmup__param=cfg.SOLVER.SCHEDULER_WARMUP['PARA'])

    trainer.build_trainer(cfg.SOLVER.MAX_STEPS)


def test():
    optimizer = eval('optim.' + cfg.SOLVER.OPTIMIZER)
    scheduler = eval('optim.lr_scheduler.' + cfg.SOLVER.SCHEDULER)
    criterion = BCEDiceLoss()
    net = SegHighResolutionNet(cfg.MODEL.SEMANTIC_SEG.CHANNELS,
                              cfg.MODEL.SEMANTIC_SEG.NUM_CLASSES,
                              **cfg.MODEL.SEMANTIC_SEG.MODEL_PARAMETER)
    net.init_weights()
    trainer = SpineSegTrainer(net,
                              criterion=criterion,
                              optimizer=optimizer,
                              lr_scheduler=scheduler,
                              gpus=cfg.NUM_GPUS,
                              distributed=False,
                              evaluators=['train', 'validation'],
                              optimizer__param=cfg.SOLVER.OPTIMIZER_PARAMETER['PARA'],
                              scheduler__param=cfg.SOLVER.SCHEDULER_PARAMETER['PARA'])
    trainer.prepare_data()
    trainer.start_inference(cfg.OUTPUT_DIR + '/training_checkpoint_142800.pth', trainer.test_dataset)


def parse_args():
    parser = argparse.ArgumentParser(description='Train a DR KeyPoint Detector')
    parser.add_argument('config', help='train config file path')
    parser.add_argument('--phase', type=str, choices=['train', 'val', 'test'], default='train')
    args = parser.parse_args()
    return args


if __name__ == '__main__':
    print('Wellcom AIS landmark detection code...')
    print_torch_version()
    print('- AIS veriosn: {}'.format(ais_version))
    args = parse_args()
    cfg_file = args.config
    phase = args.phase

    if cfg_file is not None and os.path.exists(cfg_file):
        cfg_from_file(cfg_file)
        print("load  yaml: {}".format(cfg_file))
    else:
        print('%s not found!' % (cfg_file))
        sys.exit('%s not found!' % (cfg_file))

    set_random_seed(cfg.RNG_SEED)

    if phase == 'train':
        train()
    elif phase == 'test':
        test()

    # test_predict_image()
    # predict()
    print('See you again!')
