# -*- coding: utf-8 -*-
"""
-------------------------------------------------
File Name： concat_images_tensor
Description :
Author : 'li'
date： 2022/7/6
Change Activity:
2022/7/6:
-------------------------------------------------
"""

import torch
from torch import Tensor


class ConcatImageTensor:
    """
    Concatenate images to one tensor.
    """

    def __init__(self, images, masks=None):
        """

        Args:
            images: tensor list. image with shape (c,h,w). or tensor (b,c,h,w)
            masks:
        """
        self.images = images
        self.images_tensor = images
        self.mask_tensor = masks
        self.batch_size = self.images.shape[0] if isinstance(self.images, Tensor) else len(self.images)
        self.height, self.width, self.channel_num = self._fetch_tensor_shape()
        if masks is None:
            self.build_from_list()

    def copy(self):
        return ConcatImageTensor(self.images)

    def build_from_list(self):
        self.images_tensor, self.mask_tensor = self._build_images_tensor()

    def to_device(self, device):
        self.images_tensor = self.images_tensor.to(device)
        self.mask_tensor = self.mask_tensor.to(device)

    def _fetch_tensor_shape(self):
        """

        Returns:

        """
        h_max, w_max, channels_number = 0, 0, 0
        for image in self.images_tensor:
            shape = image.shape
            if len(shape) == 2:
                tmp = torch.unsqueeze(image, dim=2)  # channels with (h,w,c)
                shape = tmp.shape
            c, h, w = shape
            if h > h_max:
                h_max = h
            if w > w_max:
                w_max = w
            if channels_number == 0 and c != 0:
                channels_number = c
            if channels_number != 0 and c != channels_number:
                raise Exception('Images must have same channels number.')
        return h_max, w_max, channels_number

    def _build_images_tensor(self):
        """

        Returns:

        """
        padding_img_lst = []
        mask_lst = []
        for img in self.images_tensor:
            h, w = img.shape[1:]
            padding_h = self.height - h
            padding_w = self.width - w
            padding_img = torch.nn.functional.pad(img, (0, padding_w, 0, padding_h))
            padding_img_lst.append(padding_img)
            mask = torch.zeros((self.height, self.width), dtype=torch.bool)
            mask[:h, :w] = True
            mask_lst.append(mask)
        return torch.stack(padding_img_lst), torch.stack(mask_lst)

    def decompose(self):
        return self.images_tensor, self.mask_tensor
