import cv2
import numpy as np
import torch
import torch.nn.functional as F

from pathlib import Path
from PIL import Image
from einops import rearrange
from torch.utils.data import Dataset
from torchvision import transforms as T
from functools import partial

from sgm.utils import pair


def identity(t, *args, **kwargs):
    return t


def crop_center(img, crop_x, crop_y):
    x, y, c = img.shape

    if x < crop_x:
        pad_l = (crop_x - x) // 2
        pad_r = crop_x - x - pad_l
        img = np.pad(img, ((pad_l, pad_r), (0, 0), (0, 0)))
        start_x = 0
    else:
        start_x = x // 2 - crop_x // 2

    if y < crop_y:
        pad_u = (crop_y - y) // 2
        pad_d = crop_y - y - pad_u
        img = np.pad(img, ((0, 0), (pad_u, pad_d), (0, 0)))
        start_y = 0
    else:
        start_y = y // 2 - crop_y // 2

    return img[start_x:(start_x + crop_x), start_y:(start_y + crop_y), :]


CHANNELS_TO_MODE = {
    1: 'L',
    3: 'RGB',
    4: 'RGBA'
}


def seek_all_images(img, channels=3):
    assert channels in CHANNELS_TO_MODE, f'channels {channels} invalid'
    mode = CHANNELS_TO_MODE[channels]

    i = 0
    while True:
        try:
            img.seek(i)
            yield img.convert(mode)
        except EOFError:
            break
        i += 1


def video_tensor_to_gif(
        tensor,
        path,
        duration=120,
        loop=0,
        optimize=True
):
    images = map(T.ToPILImage(), tensor.unbind(dim=1))
    first_img, *rest_imgs = images
    first_img.save(path, save_all=True, append_images=rest_imgs, duration=duration, loop=loop, optimize=optimize)
    return images


def gif_to_tensor(path, image_size, channels=3, horizontal_flip=False):
    transform = T.Compose([
        T.Resize(image_size),
        T.RandomHorizontalFlip() if horizontal_flip else T.Lambda(identity),
        T.CenterCrop(image_size),
        T.ToTensor()
    ])

    img = Image.open(path)
    tensors = tuple(map(transform, seek_all_images(img, channels=channels)))
    return torch.stack(tensors, dim=1)


def video_to_tensor(path, image_size=None, num_frames=-1) -> torch.Tensor:
    video = cv2.VideoCapture(str(path))

    frames = []
    check = True

    transform = T.Compose(
        [
            T.ToPILImage(),
            T.Resize(image_size),
            T.ToTensor()
        ]
    )

    while check:
        check, frame = video.read()
        if not check:
            continue

        if image_size is not None:
            frame = crop_center(frame, *image_size)

        # h w c --> c h w
        frame = transform(frame)
        frames.append(rearrange(frame, '... -> 1 ...'))

    # frames = np.array(np.concatenate(frames[:-1], axis=0))
    # frames = np.array(np.concatenate(frames, axis=0))
    # frames = rearrange(frames, 'f c h w -> c f h w')
    # frames = torch.tensor(frames).float()
    try:
        frames = torch.cat(frames, dim=0)
        frames = rearrange(frames, 'f c h w -> c f h w')
        return frames[:, :num_frames, :, :]

    except:
        print('error path', path)
        return torch.rand(3, num_frames, *image_size)


def cast_num_frames(t, frames):
    f = t.shape[1]

    if f == frames:
        return t

    if f > frames:
        return t[:, :frames]

    # 1, c t,
    return F.pad(t, (0, 0, 0, 0, 0, frames - f))


class VideoDataset(Dataset):
    def __init__(self, folder, image_size, channels=3, num_frames=24, horizontal_flip=False,
                 force_num_frames=True, exts=['gif', 'mp4']):
        super().__init__()

        self.folder = folder
        self.image_size = pair(image_size)
        self.channels = channels
        self.paths = [p for ext in exts for p in Path(f'{folder}').glob(f'**/*.{ext}')]

        # functions to transform video path to tensor
        self.gif_to_tensor = partial(gif_to_tensor, image_size=self.image_size, channels=self.channels)
        self.mp4_to_tensor = partial(video_to_tensor, image_size=self.image_size, num_frames=num_frames)

        self.cast_num_frames_fn = partial(cast_num_frames, frames=num_frames) if force_num_frames else identity

    def __len__(self):
        return len(self.paths)

    def __getitem__(self, index):
        path = self.paths[index]
        ext = path.suffix

        if ext == 'gif':
            tensor = self.gif_to_tensor(path)
        elif ext == '.mp4':
            tensor = self.mp4_to_tensor(path)
        else:
            raise ValueError(f'unknown extension {ext}')

        return self.cast_num_frames_fn(tensor)
