import math
import random
import torch
from PIL import Image


def resize(img: Image.Image, width: int, height: int) -> Image.Image:
    w, h = img.size
    scale = max(height / h, width / w)
    aw, ah = int(w * scale), int(h * scale)
    img = img.resize((aw, ah))
    l, t = (aw - width) // 2, (ah - height) // 2
    return img.crop((l, t, l + width, t + height))


def convert_fps(src: torch.Tensor, src_fps: float | int, target_fps: float | int, speed=1.0) -> list[Image.Image]:
    n_frames = src.shape[0]
    secs = n_frames / src_fps
    n_output = int(math.floor(secs * target_fps * (1 / speed)))
    idx = torch.linspace(0, n_frames - 1, n_output).int()
    # print(speed, idx)
    frames = [Image.fromarray(f.numpy()) for f in src[idx, :, :, :]]
    return frames


def random_clip(frames: list[Image.Image], n_frames: int) -> list[Image.Image]:
    if len(frames) <= n_frames:
        return frames

    offset = random.randint(0, len(frames) - n_frames)
    return frames[offset:offset + n_frames]


if __name__ == '__main__':
    from torchvision.io import read_video
    from diffusers.utils import export_to_video

    video, radio, info = read_video('vfi_00003.mp4', pts_unit='sec')
    export_to_video(convert_fps(video, info['video_fps'], 16), 'x1.0.mp4', 16)
    export_to_video(convert_fps(video, info['video_fps'], 16, 1.5), 'x1.5.mp4', 16)
    export_to_video(convert_fps(video, info['video_fps'], 16, 0.5), 'x0.5.mp4', 16)

