import os
import cv2
import tqdm
import argparse
import numpy as np

from imageio import mimsave

import torch
import config as cfg
from Trainer import Model
from benchmark.utils.padder import InputPadder

def video_interpolation(image_seq, model, ratio):
    image_seq_interpolated = []
    image_seq_tensor = torch.tensor(image_seq.transpose(0, 3, 1, 2))/ 255.
    image_seq_interpolated.append(image_seq[0])
    bar = tqdm.tqdm(range(1, len(image_seq_tensor)))
    for idx in bar:
        image_0 = image_seq_tensor[idx - 1].unsqueeze(0).cuda()
        image_2 = image_seq_tensor[idx].unsqueeze(0).cuda()
        padder = InputPadder(image_0.shape, divisor=32)
        image_0, image_2 = padder.pad(image_0, image_2)
        preds = model.multi_inference(image_0, image_2, TTA=TTA,
                                      time_list=[(i + 1) * (1. / ratio) for i in range(ratio - 1)],
                                      fast_TTA=TTA)


        for pred in preds:
            image_seq_interpolated.append(
                (padder.unpad(pred).detach().cpu().numpy().transpose(1, 2, 0) * 255.0).astype(np.uint8))
        image_seq_interpolated.append(image_seq[idx])
    return image_seq_interpolated

def save_video(output_path, image_seq, fps):
    H, W, _ = image_seq[0].shape
    video_writer = cv2.VideoWriter(output_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (W, H))
    for image in image_seq:
        video_writer.write(image)
    video_writer.release()


if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument('--model_path', default='./ckpt/ours_t.pkl', type=str)
    parser.add_argument('--video_path', type=str)
    parser.add_argument('--output_path', type=str)
    parser.add_argument('--ratio', default=2, type=int)
    args = parser.parse_args()

    TTA = True
    if args.model_path == 'ours_small_t':
        TTA = False
        cfg.MODEL_CONFIG['LOGNAME'] = 'ours_small_t'
        cfg.MODEL_CONFIG['MODEL_ARCH'] = cfg.init_model_config(
            F=16,
            depth=[2, 2, 2, 2, 2]
        )
    else:
        cfg.MODEL_CONFIG['LOGNAME'] = 'ours_t'
        cfg.MODEL_CONFIG['MODEL_ARCH'] = cfg.init_model_config(
            F=32,
            depth=[2, 2, 2, 4, 4]
        )
    model = Model(-1)
    model.load_model()
    model.eval()
    model.device()

    video_reader = cv2.VideoCapture(args.video_path)
    fps = video_reader.get(cv2.CAP_PROP_FPS)
    image_list = []

    while True:
        ret, image = video_reader.read()
        if ret:
            image_list.append(image)
        else:
            break
    image_seq = np.array(image_list)
    image_seq_interpolated = video_interpolation(image_seq, model, args.ratio)
    save_video(args.output_path, image_seq_interpolated, fps)






