import os
import re
import shutil
import warnings
import random

import torch
from diffusers.utils import export_to_video
from torchvision import io
from argparse import ArgumentParser
from glob import glob
from tqdm import tqdm
from os import path
from PIL import Image

import utils
from utils import resize
from llm_tools import LLMTools

predictor: LLMTools | None = None


def video_understand(video: str) -> str:
    return predictor.video_understand([video])[0]


def prompt_simplify(inputs: str) -> str:
    return predictor.simplify(inputs)


def proc_video(
        output: str,
        video: torch.Tensor,
        info: dict,
        target_fps: int | float,
        speed: float,
        target_frames: int = 0,
        min_frames: int = 0,
        size: tuple[int, int] | None = None,
) -> tuple[bool, list[Image.Image] | None]:
    frames = utils.convert_fps(video, info['video_fps'], target_fps, speed)
    if len(frames) < min_frames or (target_frames > 0 and len(frames) < target_frames):
        return False, None

    if target_frames > 0:
        frames = utils.random_clip(frames, target_frames)

    if size is not None:
        frames = [resize(f, *size) for f in frames]

    export_to_video(frames, output, target_fps)
    return True, frames


def main():
    os.environ['TOKENIZERS_PARALLELISM'] = 'true'
    os.environ['HF_HUB_OFFLINE'] = '1'

    parser = ArgumentParser()
    parser.add_argument('--dir', '-r', default='videos', help='videos directory')
    parser.add_argument('--output', '-o', default='output', help='output directory')
    parser.add_argument('--width', type=int, help='video width', default=720)
    parser.add_argument('--height', type=int, help='video height', default=1280)
    parser.add_argument('--capture-first-frame', '-c', action='store_true', default=False)
    parser.add_argument('--understand', '-u', action='store_true', default=False)
    parser.add_argument('--quant-8bit', action='store_false', default=True)
    parser.add_argument('--min-frames', default=1, type=int, help='min frames per video')
    parser.add_argument('--target-frames', '-t', default=-1, type=int, help='target frames')
    parser.add_argument('--force', '-f', action='store_true', default=False)
    parser.add_argument('--max-num', type=int, default=-1)
    parser.add_argument('--fps', type=int, default=16)
    parser.add_argument('--ignore-speed', action='store_true', default=False)
    parser.add_argument('--lora', type=str, default=None)
    parser.add_argument('--model-path', type=str, default='Qwen/Qwen2.5-VL-7B-Instruct')
    parser.add_argument('--enhance-speed', action='store_true', default=False)
    parser.add_argument('--simplify-prompt', '-s', action='store_true', default=False)
    parser.add_argument('--prompt', '-p', type=str, default='')

    args = parser.parse_args()
    print(args)

    if args.understand:
        global predictor
        predictor = LLMTools(
            args.model_path,
            load_in_8bit=args.quant_8bit,
            adapter=args.lora,
            use_system_prompt=True
        )

    if path.exists(args.output) and (args.force or input('clear output? (y/n)').lower() == 'y'):
        shutil.rmtree(args.output)
    elif path.exists(args.output):
        raise ValueError('output file already exists')

    os.makedirs(path.join(args.output, 'videos'), exist_ok=True)
    os.makedirs(path.join(args.output, 'images'), exist_ok=True)
    videos_idx_fp = open(path.join(args.output, 'videos.txt'), 'wt')
    prompts_fp = open(path.join(args.output, 'prompt.txt'), 'wt')
    images_idx_fp = open(path.join(args.output, 'images.txt'), 'wt') if args.capture_first_frame else None
    logs_fp = open(path.join(args.output, 'log.txt'), 'wt')
    videos = glob(path.join(args.dir, '*'))

    if 0 < args.max_num < len(videos):
        random.shuffle(videos)
        videos = videos[:args.max_num]

    t_bar = tqdm(videos)
    for fp in t_bar:
        if not os.path.isfile(fp):
            continue

        frames_tensor, audio, info = io.read_video(fp, pts_unit='sec')
        bs_speed = 1.0
        if not args.ignore_speed:
            m = re.findall(r'\[(.+)]', fp)
            if m is not None and len(m) > 0:
                bs_speed = float(m[0])
            # 根据帧率按秒抽帧, 使抽帧后的视频速度与原视频一致
            if args.fps > info['video_fps']:
                warnings.warn(f'{fp}: video fps < target fps, skip')
                continue

            t_bar.set_postfix({
                'video': fp,
                'original fps': info['video_fps'],
                'target fps': args.fps,
                'base speed': bs_speed,
            })

        old_size = frames_tensor.shape[1:3]
        # resize to training size
        basename = path.basename(fp)
        namewithoutsuffix, ext = path.splitext(basename)
        saved_video = path.join(args.output, 'videos', basename)

        res, frames = proc_video(
            saved_video,
            frames_tensor,
            info,
            args.fps,
            bs_speed,
            args.target_frames,
            args.min_frames,
            (args.width, args.height)
        )
        if not res:
            warnings.warn(f'{fp}: skipped')
            continue
        logs_fp.write(f'{basename} frames: {len(frames)}, ({old_size[1]},{old_size[0]}) -> {frames[0].size}\n')
        videos_idx_fp.write(f'videos/{basename}\n')

        # capture first frame
        if images_idx_fp is not None:
            images_idx_fp.write(f'images/{namewithoutsuffix}.jpg\n')
            frames[0].save(path.join(args.output, 'images', f'{namewithoutsuffix}.jpg'))

        prompt = args.prompt
        if args.understand:
            prompt = video_understand(saved_video)
            if args.simplify_prompt:
                prompt = prompt_simplify(prompt)
            prompt = prompt.replace('\n', ' ')

        prompts_fp.write(f'{prompt}\n' if not args.enhance_speed else f'play rate x1.0, {prompt}\n')

        if args.enhance_speed:
            enh_rates = [0.5, 0.75, 1.25, 1.5]

            for enh_rate in enh_rates:
                if enh_rate < 1.0 and info['video_fps'] * enh_rate < args.fps:
                    continue

                suffix = f'x{enh_rate}'
                e_out = path.join(args.output, 'videos', f'{namewithoutsuffix}_{suffix}{ext}')
                res, s_frames = proc_video(
                    e_out,
                    frames_tensor,
                    info,
                    args.fps,
                    bs_speed * enh_rate,
                    args.target_frames,
                    args.min_frames,
                    (args.width, args.height)
                )

                if res:
                    logs_fp.write('enhance speed, slow motion x{} generated: {}'.format(enh_rate, e_out))
                    videos_idx_fp.write(f'videos/{namewithoutsuffix}_{suffix}{ext}\n')

                    if images_idx_fp is not None:
                        images_idx_fp.write(f'images/{namewithoutsuffix}_{suffix}.jpg\n')
                        s_frames[0].save(path.join(args.output, 'images', f'{namewithoutsuffix}_{suffix}.jpg'))

                    prompts_fp.write(f'play rate x{enh_rate}, {prompt}\n')


if __name__ == '__main__':
    main()
