import torch
import logging
import time
from torch.nn import DataParallel
from tqdm import tqdm
import numpy as np

from model import build_model
from .optimizer import build_optimizer
from .scheduler import build_scheduler
from data.loader import build_train_loader, build_test_loader
from data.data_mapper import DatasetMapper, MultiScaleDatasetMapper
from data.utils import move_to
from .checkpoint import CheckPointer
from .tensorboard_logging import TensorboardWriter
#from utils.parallel import convert_to_sync_batchnorm
#from utils.sync_batchnorm import patch_replication_callback
from utils.exception import EarlyStop
from utils.boxes import resize_padding, invert_resize, clip_boxes

class Trainer:
    def __init__(self, cfg):
        self.logger = logging.getLogger(__name__)
        self.model = self.build_model(cfg)
        self.devices = cfg.MODEL.DEVICES

        if len(self.devices) == 1:
            self.model.to(self.devices[0])
        else:
            device_ids = [int(device[5:]) for device in self.devices if device.startswith("cuda:")] 
            #self.model.apply(convert_to_sync_batchnorm)
            self.model.to(self.devices[0])
            self.model = DataParallel(self.model, device_ids=device_ids)
            #patch_replication_callback(self.model)
            
        
        self.optimizer = self.build_optimizer(cfg, self.model)
        self.scheduler = self.build_scheduler(cfg, self.optimizer)
        self.train_loader = iter(self.build_train_loader(cfg))
        self.test_loader = self.build_test_loader(cfg) 
        self.start_iter = 0
        self.max_iter = cfg.OPTIMIZER.MAX_ITER
        
        self.report_iter_interval = cfg.OPTIMIZER.REPORT_INTERVAL
        self.checkpoint_save_interval = cfg.OPTIMIZER.SAVE_INTERVAL

        self.checkpointer = CheckPointer(cfg.OUTPUT_DIR, device=self.devices[0])
        
        self.evaluate_interval = cfg.OPTIMIZER.EVALUATE_INTERVAL
        self.best_iter = None
        self.best_loss = None
        self.early_stop_patience = cfg.OPTIMIZER.EARLY_STOP_PATIENCE
        
        self.scheduler_interval = cfg.SCHEDULER.INTERVAL
        self.scheduler_loss = 0
        self.scheduler_iter = 0

        self.tensorboard_writer = TensorboardWriter(cfg)

    def resume_or_load(self, resume=True):
        if resume and self.checkpointer.exists():
            self.start_iter = self.checkpointer.load(self.model, optimizer=self.optimizer, scheduler=self.scheduler)

    def train(self):
        self.logger.info("[begin] Starting training from iter {}".format(self.start_iter))
        try:
            self.iter = self.start_iter
            for self.iter in range(self.start_iter, self.max_iter):
                train_step_done = False
                self.before_step()
                self.run_step()
                train_step_done = True
                self.after_step()
        except KeyboardInterrupt:
            self.logger.info("[end] Stop by user.")
            self.checkpointer.save(self.model, optimizer=self.optimizer, scheduler=self.scheduler, iteration=(self.iter if train_step_done else self.iter - 1))
        except EarlyStop as e:
            self.logger.info(str(e))
            self.checkpointer.save(self.model, optimizer=self.optimizer, scheduler=self.scheduler, iteration=self.iter)
        finally:
            self.logger.info("[end] Terminate training at iter {}".format(self.iter))

    def before_step(self):
        if (self.iter - self.start_iter) % self.report_iter_interval == 0 or self.iter == self.start_iter:
            self.report_dict = {}

    def run_step(self):
        self.model.train()

        start_time = time.time()
        data = next(self.train_loader)
        if not isinstance(self.model, DataParallel):
            data = move_to(data, device=self.devices[0])

        self.optimizer.zero_grad()

        loss_dict, _ = self.model(data)
        loss_dict = {k : v.mean() for k, v in loss_dict.items()}
        loss = sum(loss_dict.values())

        loss.backward()
        self.optimizer.step()

        cost_time = time.time() - start_time
        
        report_iter = (self.iter - self.start_iter) % self.report_iter_interval
        self.report_dict['cost_time'] = self.report_dict.get('cost_time', 0.0) + cost_time
        self.report_dict['total_loss'] = (self.report_dict.get('total_loss', 0.0) * report_iter + loss.item()) / (report_iter + 1)
        for k, v in loss_dict.items():
            self.report_dict[k] = (self.report_dict.get(k, 0.0) * report_iter + v.item()) / (report_iter + 1)

        self.scheduler_loss += (loss.item() + self.scheduler_loss * self.scheduler_iter) / (self.scheduler_iter + 1)
        self.scheduler_iter += 1

    def after_step(self):
        if (self.iter - self.start_iter + 1) % self.report_iter_interval == 0 or self.iter == self.max_iter - 1:
            self.logger.info("[iter {}]  ".format(self.iter) + "    ".join(["{} : {}".format(key, value) for key, value in self.report_dict.items()]))
            self.tensorboard_writer.add_dict(self.report_dict, self.iter, prefix='train_')
        
        if (self.iter + 1) % self.checkpoint_save_interval == 0 or self.iter == self.max_iter - 1:
            self.checkpointer.save(self.model, optimizer=self.optimizer, scheduler=self.scheduler, iteration=self.iter)

        if self.test_loader is not None:
            if self.evaluate_interval > 0 and (self.iter + 1) % self.evaluate_interval == 0:
                val_loss = self.evaluate()
                if self.best_loss is None or self.best_loss > val_loss:
                    self.best_loss = val_loss
                    self.best_iter = self.iter
                    self.checkpointer.save(self.model, optimizer=self.optimizer, scheduler=self.scheduler, iteration=self.iter, checkpoint_name='model_best.pth')
            
            if self.best_iter is not None and (self.iter - self.best_iter) / self.evaluate_interval > self.early_stop_patience > 0:
                raise EarlyStop("[end] early stop at iter {}, best iter: {}, best loss: {}".format(self.iter, self.best_iter, self.best_loss))

        if (self.iter + 1) % self.scheduler_interval == 0:
            self.scheduler.step(self.scheduler_loss)
            self.scheduler_loss = 0
            self.scheduler_iter = 0

    def evaluate(self):
        self.model.eval()
        metric_dict = {
            "total_loss": 0,
        }
        start_time = time.time()
        data_length = 0
        with torch.no_grad():
            for data in tqdm(self.test_loader):
                if not isinstance(self.model, DataParallel):
                    data = move_to(data, self.devices[0])

                data_length += 1

                loss_dict, _ = self.model(data)
                loss_dict = {k : v.mean() for k, v in loss_dict.items()}
                loss = sum(loss_dict.values())

                metric_dict['total_loss'] += loss.item()
                for k, v in loss_dict.items():
                    metric_dict[k] = metric_dict.get(k, 0.0) + v.item()

            metric_dict['total_loss'] /= data_length
            for k in loss_dict.keys():
                metric_dict[k] /= data_length
        cost_time = time.time() - start_time
        metric_dict['cost_time'] = cost_time
                        
        self.logger.info("[evaluate iter {}]  ".format(self.iter) + "    ".join(["{} : {}".format(key, value) for key, value in metric_dict.items()]))
        self.tensorboard_writer.add_dict(metric_dict, self.iter, prefix='val_')

        return metric_dict['total_loss']


    @classmethod
    def build_model(cls, cfg):
        return build_model(cfg)

    @classmethod
    def build_optimizer(cls, cfg, model):
        return build_optimizer(cfg, model)

    @classmethod
    def build_scheduler(cls, cfg, optimizer):
        return build_scheduler(cfg, optimizer)

    @classmethod
    def build_train_loader(cls, cfg):
        assert len(cfg.DATASETS.TRAIN) > 0
        return build_train_loader(cfg, mapper=DatasetMapper(cfg, is_train=True))

    @classmethod
    def build_test_loader(cls, cfg):
        if len(cfg.DATASETS.TEST) > 0:
            return build_test_loader(cfg, mapper=DatasetMapper(cfg, is_train=False))
        else:
            return None

class MultiScaleTrainer(Trainer):
    @classmethod
    def build_train_loader(cls, cfg):
        assert len(cfg.DATASETS.TRAIN) > 0
        return build_train_loader(cfg, mapper=MultiScaleDatasetMapper(cfg, is_train=True))


class Predictor:
    def __init__(self, cfg):
        self.model = self.build_model(cfg)
        self.devices = cfg.MODEL.DEVICES
        self.input_size = cfg.MODEL.INPUT.SIZE

        checkpointer = CheckPointer(cfg.OUTPUT_DIR, device=self.devices[0])
        checkpointer.load(self.model, path=cfg.MODEL.get('WEIGHTS', None))

        self.model.to(self.devices[0])
        
        if len(self.devices) > 1:
            device_ids = [int(device[5:]) for device in self.devices if device.startswith("cuda:")]
            self.model = DataParallel(self.model, device_ids=device_ids)

        self.model.eval()

    def __call__(self, images):
        
        params_list = [{"height": image.shape[0], "width": image.shape[1]} for image in images]
        images, resize_params_list = zip(*[resize_padding(image, size=self.input_size) for image in images])
        images = [np.expand_dims(image, -1) if len(image.shape) == 2 else image for image in images]
        images = torch.from_numpy(np.stack(images, axis=0).transpose(0, 3, 1, 2))
        
        with torch.no_grad():
            data = {'image': images}
            if not isinstance(self.model, DataParallel):
                data = move_to(data, self.devices[0])

            _, data_dict = self.model(data)
            
            data_dict['bboxes'] = [invert_resize(bboxes, resize_params) for bboxes, resize_params in zip(data_dict['bboxes'], resize_params_list)]
            data_dict['bboxes'], data_dict['classification'] = zip(*[clip_boxes(bboxes, classification, params) for bboxes, classification, params in zip(data_dict['bboxes'], data_dict['classification'], params_list)])

        return data_dict
                
    @classmethod
    def build_model(cls, cfg):
        return build_model(cfg)

class TrainerWithSimpleScheduler(Trainer):
    def after_step(self):
        if (self.iter - self.start_iter + 1) % self.report_iter_interval == 0 or self.iter == self.max_iter - 1:
            self.logger.info("[iter {}]  ".format(self.iter) + "    ".join(["{} : {}".format(key, value) for key, value in self.report_dict.items()]))
            self.tensorboard_writer.add_dict(self.report_dict, self.iter, prefix='train_')
        
        if (self.iter + 1) % self.checkpoint_save_interval == 0 or self.iter == self.max_iter - 1:
            self.checkpointer.save(self.model, optimizer=self.optimizer, scheduler=self.scheduler, iteration=self.iter)

        if self.test_loader is not None:
            if self.evaluate_interval > 0 and (self.iter + 1) % self.evaluate_interval == 0:
                val_loss = self.evaluate()
                if self.best_loss is None or self.best_loss > val_loss:
                    self.best_loss = val_loss
                    self.best_iter = self.iter
                    self.checkpointer.save(self.model, optimizer=self.optimizer, scheduler=self.scheduler, iteration=self.iter, checkpoint_name='model_best.pth')
            
            if self.best_iter is not None and (self.iter - self.best_iter) / self.evaluate_interval > self.early_stop_patience > 0:
                raise EarlyStop("[end] early stop at iter {}, best iter: {}, best loss: {}".format(self.iter, self.best_iter, self.best_loss))

        if (self.iter + 1) % self.scheduler_interval == 0:
            self.scheduler.step()
            self.scheduler_loss = 0
            self.scheduler_iter = 0
