from pathlib import Path
from time import strftime
import os.path as ops
import torch
from utils import get_logger, get_dist_info, mkdir


class Exp_Basic(object):
    def __init__(self, cfg):
        self.cfg = cfg
        self.model_name = cfg.model_name
        output_dir = cfg.get("output_dir")
        if cfg.get("output_dir") is None:
            output_dir = f"./output/{self.model_name}/{strftime('%m%d_%H%M')}"
        self.output_dir = Path(output_dir)
        self.output_dir.mkdir(parents=True, exist_ok=True)

        # mkdir a temporal file, save for resuming
        self.temp_dir = Path(ops.join(self.output_dir, "temp"))
        self.temp_dir.mkdir(parents=True, exist_ok=True)

        self.device = self._acquire_device()
        self.logger = get_logger("paddlevideo")
        # 1.Construct env
        self.init_environment()
        # 2. Construct model
        self.init_model()
        # 3. Construct dataset and dataloader for training and evaluation
        self.init_dataloader()
        # 4. Construct learning rate scheduler(lr) and optimizer
        self.init_solver()

    def _acquire_device(self):
        world_size, rank = get_dist_info()
        if world_size != -1:  # if use DDP
            device = torch.device("cuda", rank)
            # device = torch.device('cuda:{}'.format(rank))
        else:  # single GPU
            device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
        return device

    def init_environment(self):
        self.amp = self.cfg.get('amp', False)
        if self.amp:
            self.scaler = torch.cuda.amp.GradScaler()

        batch_size = self.cfg.DATASET.get('batch_size', 8)
        self.use_gradient_accumulation = self.cfg.get('GRADIENT_ACCUMULATION', None)

        # only make effect on single gpu train
        if self.use_gradient_accumulation and not torch.distributed.is_initialized():
            global_batch_size = self.cfg.GRADIENT_ACCUMULATION.get(
                'global_batch_size', None)

            assert isinstance(
                global_batch_size, int
            ), f"global_batch_size must be int, but got {type(global_batch_size)}"
            assert batch_size < global_batch_size, f"global_batch_size must bigger than batch_size"
            # The number of iterations required to reach the global batchsize
            self.cfg.GRADIENT_ACCUMULATION["num_iters"] = global_batch_size // batch_size
            self.logger.info(
                f"Using gradient accumulation training strategy, "
                f"global_batch_size={global_batch_size}, "
                f"num_accumulative_iters={self.cfg.GRADIENT_ACCUMULATION.num_iters}")

    def init_model(self):
        raise NotImplementedError

    def init_dataloader(self):
        raise NotImplementedError

    def init_solver(self):
        # 3. Construct solver.
        raise NotImplementedError

    def vali_epoch(self):
        raise NotImplementedError

    def train_epoch(self):
        raise NotImplementedError

    def _save_checkpoint(self):
        raise NotImplementedError

    def _load_checkpoint(self):
        raise NotImplementedError