import mindspore as ms
import cv2
import math
import numpy as np
import random

from mindspore.ops import operations as P
from mindformers.models.blip2.blip2_processor import Blip2ImageProcessor

# ms.set_context(device_target='CPU')
video_format_list = ('.mp4', '.avi', '.flv', '.mpeg', '.f4v', '.mkv')


class VideoChat2VideoProcessor:
    def __init__(self, num_frames=1, channel=3, image_size=224):
        self.num_frames = num_frames
        self.channel = channel
        self.image_size = image_size

    def __call__(self, video_file_path):
        return self.cv_video_to_image(video_file_path)

    def cv_video_to_image(self, video_path):
        cap = cv2.VideoCapture(video_path)
        frame_nums = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
        fps = int(cap.get(cv2.CAP_PROP_FPS))
        frame_step = math.ceil(frame_nums / self.num_frames)
        if frame_nums == 0 or frame_step == 0:
            return None
        frame_indices = self.get_frame_indices(self.num_frames, frame_nums, sample='middle', input_fps=fps)
        frames_list = []
        for i in frame_indices:
            cap.set(1, i)
            success, image = cap.read()
            if success:
                frames_list.append(image)
        cap.release()
        processor = Blip2ImageProcessor(image_size=self.image_size)
        output = processor(np.array(frames_list))
        output = P.Reshape()(output, (self.channel, self.num_frames, self.image_size, self.image_size))
        return output

    def get_frame_indices(self, num_frames, vlen, sample='rand', fix_start=None, input_fps=1, max_num_frames=-1):
        if sample in ["rand", "middle"]:  # uniform sampling
            acc_samples = min(num_frames, vlen)
            # split the video into `acc_samples` intervals, and sample from each interval.
            intervals = np.linspace(start=0, stop=vlen, num=acc_samples + 1).astype(int)
            ranges = []
            for idx, interv in enumerate(intervals[:-1]):
                ranges.append((interv, intervals[idx + 1] - 1))
            if sample == 'rand':
                try:
                    frame_indices = [random.choice(range(x[0], x[1])) for x in ranges]
                except:
                    frame_indices = np.random.permutation(vlen)[:acc_samples]
                    frame_indices.sort()
                    frame_indices = list(frame_indices)
            elif fix_start is not None:
                frame_indices = [x[0] + fix_start for x in ranges]
            elif sample == 'middle':
                frame_indices = [(x[0] + x[1]) // 2 for x in ranges]
            else:
                raise NotImplementedError

            if len(frame_indices) < num_frames:  # padded with last frame
                padded_frame_indices = [frame_indices[-1]] * num_frames
                padded_frame_indices[:len(frame_indices)] = frame_indices
                frame_indices = padded_frame_indices
        elif "fps" in sample:  # fps0.5, sequentially sample frames at 0.5 fps
            output_fps = float(sample[3:])
            duration = float(vlen) / input_fps
            delta = 1 / output_fps  # gap between frames, this is also the clip length each frame represents
            frame_seconds = np.arange(0 + delta / 2, duration + delta / 2, delta)
            frame_indices = np.around(frame_seconds * input_fps).astype(int)
            frame_indices = [e for e in frame_indices if e < vlen]
            if 0 < max_num_frames < len(frame_indices):
                frame_indices = frame_indices[:max_num_frames]
        else:
            raise ValueError
        return frame_indices


if __name__ == "__main__":
    path = '/home/zhangyouwen/work/data/mobile_video_data/video_train/video_00000.mp4'
    processor = VideoChat2VideoProcessor()
    output = processor(path)
    print(output)
