import os.path
import random
import cv2
import moviepy.editor as mpy
from moviepy.video.VideoClip import ImageClip
from moviepy.video.compositing.concatenate import concatenate

from settings import PIC_IN_PATH
from job.generate_image_embed import ImageEmbedProduce
from job.generate_video_embed import VideoProduce

class GenerateVideo:
    @staticmethod
    def add_random_transition(clip1, clip2):
        transitions = [
            # lambda c1, c2: concatenate([c1, c2.set_start(c2.duration)]),  # 直接连接
            lambda c1, c2: concatenate([c1.fadeout(0.5), c2.fadein(0.5)]),  # 淡入淡出
            # lambda c1, c2: concatenate([c1.crossfadein(0.5), c2.crossfadeout(0.5)]),  # 交叉淡入淡出
            # 添加更多的过场特效函数
        ]
        transition_func = random.choice(transitions)
        return transition_func(clip1, clip2)

    def get_duration(self, video_sec, img_num):
        """
        给定图片数量和视频时长，大致获取每张图片的持续时间

        """
        if img_num == 0:
            return 0
        return video_sec / (2 * img_num - 1)

    def gen_mute_video_by_pictures(self, out_path, size, video_sec=None, fps=24):
        img_path_arr = os.listdir(PIC_IN_PATH)
        picture_num = len(img_path_arr)
        duration = self.get_duration(video_sec, picture_num)
        if picture_num == 0:
            return False, "图片数量为空，无法生成视频"
        # print("每张图片合理的持续时间为: ", picture_num, duration)
        self.generate_video(duration, fps, img_path_arr, out_path, size)

    # def gen_mute_video_by_gpt(self, out_path, size, labels, generic_path, video_sec=None, fps=24):
    #     image_produce = ImageProduce()
    #     img_path_arr = image_produce.get_image_path(labels, video_sec, generic_path)
    #     picture_num = len(img_path_arr)
    #     duration = self.get_duration(video_sec, picture_num)
    #     if picture_num == 0:
    #         return False, "图片数量为空，无法生成视频"
    #     # print("每张图片合理的持续时间为: ", picture_num, duration)
    #     self.generate_video(duration, fps, img_path_arr, out_path, size)

    def gen_mute_image_video_embedding(self, out_path, size, embedding, video_sec=None, fps=24):
        # 图片合成视频
        image_embed_produce = ImageEmbedProduce()
        img_path_arr = image_embed_produce.run(video_sec, embedding)
        picture_num = len(img_path_arr)
        duration = self.get_duration(video_sec, picture_num)
        if picture_num == 0:
            return False, "图片数量为空，无法生成视频"
        # print("每张图片合理的持续时间为: ", picture_num, duration)
        self.generate_video(duration, fps, img_path_arr, out_path, size)

    def gen_mute_video_video_embedding(self, out_path, embedding, video_sec=None, fps=24):
        # 视频合成视频
        video_embed_produce = VideoProduce()
        video_data = video_embed_produce.run(video_sec, embedding)
        if video_data is None:
            return False, "视频数量为空，无法生成视频"
        video_path_arr = video_data[0]
        video_time_list = video_data[1]
        total_video_time = video_data[2]
        video_num = len(video_path_arr)
        if video_num == 0:
            return False, "图片数量为空，无法生成视频"
        self.new_generate_video(video_path_arr, video_time_list, out_path, fps)

    def new_generate_video(self, video_path_arr, video_time_list,  out_path, fps=24):
        # duration音频的时长
        video_clips = []
        transition_duration = len(video_path_arr) - 1
        for index, video_path in enumerate(video_path_arr):
            clip = mpy.VideoFileClip(video_path)
            clip_duration = video_time_list[index]
            clip = clip.set_duration(clip_duration)
            video_clips.append(clip)
        final_clip = mpy.concatenate([i for i in video_clips])
        final_clip.write_videofile(out_path, fps=fps, codec='libx264') #, remove_temp=True

    def generate_video(self, duration, fps, img_path_arr, out_path, size):
        img_array = []
        for img_path in img_path_arr:
            if not os.path.exists(img_path):
                print(img_path, "not exists")
                samples = os.listdir(PIC_IN_PATH)
                img_path = os.path.join(PIC_IN_PATH, random.choice(samples))
            image = cv2.imread(img_path)
            image = cv2.resize(image, size)
            b, g, r = cv2.split(image)
            image = cv2.merge((r, g, b))
            video_clip = ImageClip(image).set_duration(duration)
            img_array.append(video_clip)
        transitions = [self.add_random_transition(clip1, clip2) for clip1, clip2 in zip(img_array, img_array[1:])]
        clip = concatenate([img_array[0]] + transitions)
        clip.write_videofile(out_path, fps=fps, codec='libx264', audio_bitrate="3000k", write_logfile=True, format="mov")
        # threads = 5, codec = "h264_nvenc"
