import os
import random
import time
import logging
from datetime import datetime
import numpy as np
from PIL import Image
import multiprocess
import mindspore
from mindspore import nn, Tensor
from mindspore.dataset import vision
from mindspore import dataset as de
from mindspore.train.serialization import load_checkpoint, load_param_into_net
from launcher import get_project_root


IMG_EXTENSIONS = ['.jpg', '.jpeg', '.png', '.ppm', '.bmp', '.tif', '.tiff']


class ImagePool:
    def __init__(self, pool_size):
        """
        Initialize the ImagePool class
        Args:
            pool_size (int): the size of image buffer, if pool_size=0, no buffer will be created.
        """
        self.pool_size = pool_size
        if self.pool_size > 0:  # create an empty pool
            self.num_imgs = 0
            self.images = []

    def query(self, images):
        if isinstance(images, Tensor):
            images = images.asnumpy()
        if self.pool_size == 0:  # if the buffer size is 0, do nothing
            return Tensor(images)
        return_images = []
        for image in images:
            if self.num_imgs < self.pool_size:   # if the buffer is not full; keep inserting current images to the buffer
                self.num_imgs = self.num_imgs + 1
                self.images.append(image)
                return_images.append(image)
            else:
                p = random.uniform(0, 1)
                if p > 0.5:  # by 50% chance, the buffer will return a previously stored image, and insert the current image into the buffer
                    random_id = random.randint(0, self.pool_size - 1)  # randint is inclusive
                    tmp = self.images[random_id].copy()
                    self.images[random_id] = image
                    return_images.append(tmp)
                else:       # by another 50% chance, the buffer will return the current image
                    return_images.append(image)
        return_images = np.array(return_images)   # collect all the images and return
        if len(return_images.shape) != 4:
            raise ValueError("img should be 4d, but get shape {}".format(return_images.shape))
        return Tensor(return_images)


def save_image(img, img_path):
    if isinstance(img, Tensor):
        img = img.asnumpy()
    elif not isinstance(img, np.ndarray):
        raise ValueError("img should be Tensor or numpy array, but get {}".format(type(img)))
    img = decode_image(img)
    img_pil = Image.fromarray(img)
    img_pil.save(img_path)


def decode_image(img):
    """Decode a [1, C, H, W] Tensor to image numpy array."""
    mean = 0.5 * 255
    std = 0.5 * 255
    return (img[0] * std + mean).astype(np.uint8).transpose((1, 2, 0))


def get_lr(lr_init, step_size, num_epoch, warm_up_epoch, n_epochs_decay):
    lrs = [lr_init] * step_size * warm_up_epoch
    lr_epoch = 0
    for epoch in range(n_epochs_decay):
        lr_epoch = lr_init * (n_epochs_decay - epoch) / n_epochs_decay
        lrs += [lr_epoch] * step_size
    lrs += [lr_epoch] * step_size * (num_epoch - n_epochs_decay - warm_up_epoch)
    return Tensor(np.array(lrs).astype(np.float32))


def load_ckpt(G_A, G_B, is_load_GA, is_load_GB, is_load_DA, is_load_DB, D_A=None, D_B=None):
    """Load parameter from checkpoint."""
    if is_load_GA:
        param_GA = load_checkpoint(os.path.join(get_project_root(), "gan_checkpoints", "g_a.ckpt"))
        load_param_into_net(G_A, param_GA)
    if is_load_GB:
        param_GB = load_checkpoint(os.path.join(get_project_root(), "gan_checkpoints", "g_b.ckpt"))
        load_param_into_net(G_B, param_GB)
    if D_A is not None and is_load_DA:
        param_DA = load_checkpoint(os.path.join(get_project_root(), "gan_checkpoints", "d_a.ckpt"))
        load_param_into_net(D_A, param_DA)
    if D_B is not None and is_load_DB:
        param_DB = load_checkpoint(os.path.join(get_project_root(), "gan_checkpoints", "d_b.ckpt"))
        load_param_into_net(D_B, param_DB)


def enable_batch_statistics(net):
    """Enable batch statistics in all BatchNorms"""
    if isinstance(net, nn.BatchNorm2d):
        net.use_batch_statistics = True
    else:
        for cell in net.cells():
            enable_batch_statistics(cell)


class Reporter(logging.Logger):
    def __init__(self, step_size, is_save_checkpoint, save_checkpoint_epochs=1):
        super(Reporter, self).__init__("cyclegan")
        self.log_dir = os.path.join(get_project_root(), 'log')
        self.imgs_dir = os.path.join(get_project_root(), "figures")
        self.ckpts_dir = os.path.join(get_project_root(), "gan_checkpoints")
        if not os.path.exists(self.log_dir):
            os.makedirs(self.log_dir, exist_ok=True)
        if not os.path.exists(self.imgs_dir):
            os.makedirs(self.imgs_dir, exist_ok=True)
        if not os.path.exists(self.ckpts_dir):
            os.makedirs(self.ckpts_dir, exist_ok=True)
        self.rank = 0
        self.is_save_checkpoint = is_save_checkpoint
        self.save_checkpoint_epochs = save_checkpoint_epochs
        self.save_imgs = True
        # console handler
        console = logging.StreamHandler()
        console.setLevel(logging.INFO)
        formatter = logging.Formatter('%(message)s')
        console.setFormatter(formatter)
        self.addHandler(console)
        # file handler
        log_name = datetime.now().strftime('%Y-%m-%d_time_%H_%M_%S') + '_rank_{}.log'.format(self.rank)
        self.log_fn = os.path.join(self.log_dir, log_name)
        fh = logging.FileHandler(self.log_fn)
        fh.setLevel(logging.INFO)
        fh.setFormatter(formatter)
        self.addHandler(fh)
        self.step = 0
        self.epoch = 0
        self.dataset_size = step_size
        self.print_iter = 100
        self.G_loss = []
        self.D_loss = []

    def info(self, msg, *args, **kwargs):
        if self.isEnabledFor(logging.INFO):
            self._log(logging.INFO, msg, args, **kwargs)

    def save_args(self, args):
        self.info('Args:')
        args_dict = vars(args)
        for key in args_dict.keys():
            self.info('--> %s: %s', key, args_dict[key])
        self.info('')

    def epoch_start(self):
        self.step_start_time = time.time()
        self.epoch_start_time = time.time()
        self.step = 0
        self.epoch += 1
        self.G_loss = []
        self.D_loss = []

    def step_end(self, res_G, res_D):
        """print log when step end."""
        self.step += 1
        loss_D = float(res_D.asnumpy())
        res = []
        for item in res_G[2:]:
            res.append(float(item.asnumpy()))
        self.G_loss.append(res[0])
        self.D_loss.append(loss_D)
        if self.step % self.print_iter == 0:
            step_cost = (time.time() - self.step_start_time) * 1000 / self.print_iter
            losses = "G_loss: {:.2f}, D_loss:{:.2f}, loss_G_A: {:.2f}, loss_G_B: {:.2f}, loss_C_A: {:.2f},"\
                     "loss_C_B: {:.2f}, loss_idt_A: {:.2f}, loss_idt_B：{:.2f}".format(
                         res[0], loss_D, res[1], res[2], res[3], res[4], res[5], res[6])
            self.info("Epoch[{}] [{}/{}] step cost: {:.2f} ms, {}".format(
                self.epoch, self.step, self.dataset_size, step_cost, losses))
            self.step_start_time = time.time()

    def epoch_end(self, net):
        """print log and save checkpoints when epoch end."""
        epoch_cost = (time.time() - self.epoch_start_time) * 1000
        per_step_time = epoch_cost / self.dataset_size
        mean_loss_G = sum(self.G_loss) / self.dataset_size
        mean_loss_D = sum(self.D_loss) / self.dataset_size
        self.info("Epoch [{}] total cost: {:.2f} ms, per step: {:.2f} ms, G_loss: {:.2f}, D_loss: {:.2f}".format(
            self.epoch, epoch_cost, per_step_time, mean_loss_G, mean_loss_D))

        if self.is_save_checkpoint and self.epoch % self.save_checkpoint_epochs == 0:
            mindspore.save_checkpoint(net.G.generator.G_A, os.path.join(self.ckpts_dir, f"G_A_{self.epoch}.ckpt"))
            mindspore.save_checkpoint(net.G.generator.G_B, os.path.join(self.ckpts_dir, f"G_B_{self.epoch}.ckpt"))
            mindspore.save_checkpoint(net.G.D_A, os.path.join(self.ckpts_dir, f"D_A_{self.epoch}.ckpt"))
            mindspore.save_checkpoint(net.G.D_B, os.path.join(self.ckpts_dir, f"D_B_{self.epoch}.ckpt"))
            # left latest 20, remove outdated ckpts.
            ckpt_files = [os.path.join(self.ckpts_dir, file) for file in os.listdir(self.ckpts_dir)]
            ckpt_files = sorted(ckpt_files, key=os.path.getmtime)
            if len(ckpt_files) > 20:
                for i in range(len(ckpt_files) - 20):
                    os.remove(ckpt_files[i])


    def visualizer(self, img_A, img_B, fake_A, fake_B):
        if self.save_imgs and self.step % self.dataset_size == 0:
            save_image(img_A, os.path.join(self.imgs_dir, f"{self.epoch}_img_A.jpg"))
            save_image(img_B, os.path.join(self.imgs_dir, f"{self.epoch}_img_B.jpg"))
            save_image(fake_A, os.path.join(self.imgs_dir, f"{self.epoch}_fake_A.jpg"))
            save_image(fake_B, os.path.join(self.imgs_dir, f"{self.epoch}_fake_B.jpg"))

    def start_predict(self, direction):
        self.predict_start_time = time.time()
        self.direction = direction
        self.info('==========start predict %s===============', self.direction)

    def end_predict(self):
        cost = (time.time() - self.predict_start_time) * 1000
        per_step_cost = cost / self.dataset_size
        self.info('total {} imgs cost {:.2f} ms, per img cost {:.2f}'.format(self.dataset_size, cost, per_step_cost))
        self.info('==========end predict %s===============\n', self.direction)


class DistributedSampler:
    """Distributed sampler."""
    def __init__(self, dataset_size, num_replicas=None, rank=None, shuffle=True):
        if num_replicas is None:
            print("***********Setting world_size to 1 since it is not passed in ******************")
            num_replicas = 1
        if rank is None:
            print("***********Setting rank to 0 since it is not passed in ******************")
            rank = 0
        self.dataset_size = dataset_size
        self.num_replicas = num_replicas
        self.rank = rank
        self.epoch = 0
        self.num_samples = int(np.ceil(dataset_size * 1.0 / self.num_replicas))
        self.total_size = self.num_samples * self.num_replicas
        self.shuffle = shuffle

    def __iter__(self):
        # deterministically shuffle based on epoch
        if self.shuffle:
            indices = np.random.RandomState(seed=self.epoch).permutation(self.dataset_size)
            # np.array type. number from 0 to len(dataset_size)-1, used as index of dataset
            indices = indices.tolist()
            self.epoch += 1
            # change to list type
        else:
            indices = list(range(self.dataset_size))

        # add extra samples to make it evenly divisible
        indices += indices[:(self.total_size - len(indices))]
        assert len(indices) == self.total_size

        # subsample
        indices = indices[self.rank:self.total_size:self.num_replicas]
        assert len(indices) == self.num_samples

        return iter(indices)

    def __len__(self):
        return self.num_samples

def is_image_file(filename):
    """Judge whether it is a picture."""
    return any(filename.lower().endswith(extension) for extension in IMG_EXTENSIONS)


def make_dataset(dir_path, max_dataset_size=float("inf")):
    images = []
    assert os.path.isdir(dir_path), '%s is not a valid directory' % dir_path

    for root, _, fnames in sorted(os.walk(dir_path)):
        for fname in fnames:
            if is_image_file(fname):
                path = os.path.join(root, fname)
                images.append(path)
    return images[:min(max_dataset_size, len(images))]


class UnalignedDataset:
    def __init__(self, dataroot, is_train, max_dataset_size=float("inf"), use_random=True):
        if is_train:
            self.dir_A = os.path.join(dataroot, 'valA')
            self.dir_B = os.path.join(dataroot, 'valB')
        else:
            self.dir_A = os.path.join(dataroot, 'valA')
            self.dir_B = os.path.join(dataroot, 'valB')

        self.A_paths = sorted(make_dataset(self.dir_A, max_dataset_size))   # load images from '/path/to/data/valA'
        self.B_paths = sorted(make_dataset(self.dir_B, max_dataset_size))    # load images from '/path/to/data/valB'
        self.A_size = len(self.A_paths)  # get the size of dataset A
        self.B_size = len(self.B_paths)  # get the size of dataset B
        self.use_random = use_random

    def __getitem__(self, index):
        """Return a data point and its metadata information.

        Parameters:
            index (int)      -- a random integer for data indexing

        Returns a dictionary that contains A, B, A_paths and B_paths
            A (tensor)       -- an image in the input domain
            B (tensor)       -- its corresponding image in the target domain
            A_paths (str)    -- image paths
            B_paths (str)    -- image paths
        """
        index_B = index % self.B_size
        if index % max(self.A_size, self.B_size) == 0 and self.use_random:
            random.shuffle(self.A_paths)
            index_B = random.randint(0, self.B_size - 1)
        A_path = self.A_paths[index % self.A_size]
        B_path = self.B_paths[index_B]
        A_img = np.array(Image.open(A_path).convert('RGB'))
        B_img = np.array(Image.open(B_path).convert('RGB'))

        return A_img, B_img

    def __len__(self):
        """Return the total number of images in the dataset.
        """
        return max(self.A_size, self.B_size)


class ImageFolderDataset:
    def __init__(self, dataroot, max_dataset_size=float("inf")):
        self.dataroot = dataroot
        self.paths = sorted(make_dataset(dataroot, max_dataset_size))
        self.size = len(self.paths)

    def __getitem__(self, index):
        img_path = self.paths[index % self.size]
        img = np.array(Image.open(img_path).convert('RGB'))

        return img, os.path.split(img_path)[1]

    def __len__(self):
        """Return the total number of images in the dataset.
        As we have two datasets with potentially different number of images,
        we take a maximum of
        """
        return self.size


def create_dataset(data_path, is_train:bool, batch_size:int, device_num:int, shuffle:bool, rank=None, image_size=512, use_random=False):
    dataroot = data_path  # 填写到trainA, trainB目录的上级目录
    cores = multiprocess.cpu_count()
    num_parallel_workers = min(8, int(cores / device_num))
    mean = [0.5 * 255] * 3
    std = [0.5 * 255] * 3
    if is_train:
        dataset = UnalignedDataset(dataroot, is_train, use_random=use_random)
        distributed_sampler = DistributedSampler(len(dataset), device_num, rank, shuffle=shuffle)
        ds = de.GeneratorDataset(dataset, column_names=["image_A", "image_B"],
                                 sampler=distributed_sampler, num_parallel_workers=num_parallel_workers)
        if use_random:
            trans = [
                vision.RandomResizedCrop(image_size, scale=(0.5, 1.0), ratio=(0.75, 1.333)),
                vision.RandomHorizontalFlip(prob=0.5),
                vision.Normalize(mean=mean, std=std),
                vision.HWC2CHW()
            ]
        else:
            trans = [
                vision.Resize((image_size, image_size)),
                vision.Normalize(mean=mean, std=std),
                vision.HWC2CHW()
            ]
        ds = ds.map(operations=trans, input_columns=["image_A"], num_parallel_workers=num_parallel_workers)
        ds = ds.map(operations=trans, input_columns=["image_B"], num_parallel_workers=num_parallel_workers)
        ds = ds.batch(batch_size, drop_remainder=True)
    else:
        datadir = os.path.join(dataroot)
        dataset = ImageFolderDataset(datadir)
        ds = de.GeneratorDataset(dataset, column_names=["image", "image_name"],
                                 num_parallel_workers=num_parallel_workers)
        trans = [
            vision.Resize((image_size, image_size)),
            vision.Normalize(mean=mean, std=std),
            vision.HWC2CHW()
        ]
        ds = ds.map(operations=trans, input_columns=["image"], num_parallel_workers=num_parallel_workers)
        ds = ds.batch(1, drop_remainder=True)
    return ds


__all__ = ["create_dataset",
           "load_ckpt",
           "Reporter",
           "save_image"]
