from moviepy import *

from PIL import ImageColor, ImageFont
import cv2
import uuid
import tempfile
import logging
import numpy as np

from VideoHandler.effect.FadeInAndOut import FadeInAndOut
from VideoHandler.utils.file_utils import delete_file, download_file
from VideoHandler.utils.function_utils import slide_in_bounce
logger = logging.getLogger('VideoHandler')

# 视频单帧模糊
def apply_gaussian_blur(frame, blur_size, x, y, width, height):
    # 将frame转换为OpenCV的格式（如果它是RGB，则需要转换为BGR）
    # OpenCV默认使用BGR格式，而MoviePy使用RGB格式
    frame_bgr = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
    # 应用高斯模糊
    blurred_frame_bgr = cv2.GaussianBlur(frame_bgr, (blur_size, blur_size), 0)
    # 将结果转换回RGB格式
    blurred_frame_rgb = cv2.cvtColor(blurred_frame_bgr, cv2.COLOR_BGR2RGB)
    # 对结果进行裁剪
    return blurred_frame_rgb[y:y+height, x:x+width]

def video_extend(video_clip, duration):
    last_frame = video_clip.get_frame(video_clip.duration - 1)
    last_image = ImageClip(last_frame).with_duration(duration)
    return concatenate_videoclips((video_clip, last_image))


# 视频尺寸调整+模糊
def video_size_clip(path, width, height):
    cap = cv2.VideoCapture(path)
    fourcc = cv2.VideoWriter_fourcc(*'mp4v')
    fps = cap.get(cv2.CAP_PROP_FPS)
    clip_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
    clip_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
    background_video_path = tempfile.gettempdir() + "/" + str(uuid.uuid4()) + ".mp4"
    max_size = max(width, height)
    out = cv2.VideoWriter(background_video_path, fourcc, fps, (width, height))
    while cap.isOpened():
        ret, frame = cap.read()
        if ret:
            if (clip_width / clip_height) > (width / height):
                # 裁剪视频帧  尺寸为偶数
                resize_width = clip_width * max_size // clip_height // 2 * 2
                cropped_frame = cv2.resize(frame, (resize_width, max_size))
                position_x = int((clip_width * max_size // clip_height) - width) // 4 * 2
                cropped_frame = cropped_frame[0: height, position_x: position_x + width]
                cropped_frame = cv2.GaussianBlur(cropped_frame, (149, 149), 0, 0)
                out.write(cropped_frame)
            else:
                resize_height = clip_height * max_size // clip_width // 2 * 2
                cropped_frame = cv2.resize(frame, (max_size, resize_height))
                position_y = int((clip_height * max_size // clip_width) - height) // 4 * 2
                cropped_frame = cropped_frame[position_y: position_y + height, 0: width]
                cropped_frame = cv2.GaussianBlur(cropped_frame, (149, 149), 0, 0)
                out.write(cropped_frame)
            if cv2.waitKey(1) & 0xFF == ord('q'):
                break
        else:
            break

    cap.release()
    out.release()
    cv2.destroyAllWindows()
    return background_video_path
def hex_to_rgb(hex_code):
    """使用Pillow库将十六进制转换为RGB"""
    return ImageColor.getrgb(hex_code)

# 图片（模糊）-视频融合
def video_mix(video_clip, width, height):
    # 获取视频指定帧图片
    bg_img_clip = ImageClip(video_clip.get_frame(1))
    # 图片放到到max(height, width)
    max_size = max(width, height)

    final_duration = video_clip.duration
    # 横板视频做纵版虚化
    # 情况1 物料横纵比>最终尺寸横纵比
    if (bg_img_clip.w / bg_img_clip.h) > (width / height):
        # 上下背景  物料横纵比<最终尺寸横纵比
        bg_img_clip = bg_img_clip.resized(height=max_size)
        ## 对图片进行高斯模糊，并裁剪成指定尺寸
        bg_img_clip = bg_img_clip.image_transform(lambda frame: apply_gaussian_blur(frame, 99, (bg_img_clip.w - width) // 2, 0, width, height))
        # 视频拉伸到指定宽度width，设置位置居中
        video_clip = video_clip.resized(width = width)
        # 根据video_clip新高度设置居中
        video_clip = video_clip.with_position((0, (height - video_clip.h) // 2))
    else:
        # 左右背景
        bg_img_clip = bg_img_clip.resized(width = max_size)
        ## 对图片进行高斯模糊，并裁剪成指定尺寸
        bg_img_clip = bg_img_clip.image_transform(lambda frame: apply_gaussian_blur(frame, 99, 0, (bg_img_clip.h - height) // 2, width, height))
        # 视频拉伸到指定宽度width，设置位置居中
        video_clip = video_clip.resized(height = height)
        # 根据video_clip新高度设置居中
        video_clip = video_clip.with_position(((width - video_clip.w) // 2, 0))
    return [bg_img_clip.with_duration(final_duration), video_clip]

# 视频（模糊）-视频融合
def video_mix_double(video_clip, background_clip, width, height, clip_position):
    position_x = None
    position_y = None
    if clip_position is not None:
        position_x = clip_position['positionX']
        position_y = clip_position['positionX']
    if (video_clip.w / video_clip.h) > (width / height):
        # 上下背景  物料横纵比<最终尺寸横纵比
        # 根据video_clip新高度设置居中
        video_clip = video_clip.resized(width=width)
        if position_y is not None:
            video_clip = video_clip.with_position((0, position_y))
        else:
            video_clip = video_clip.with_position((0, (height - video_clip.h) // 2))
    else:
        # 左右背景
        # 根据video_clip新高度设置居中
        video_clip = video_clip.resized(height=height)
        if position_x is not None:
            video_clip = video_clip.with_position((position_x, 0))
        else:
            video_clip = video_clip.with_position(((width - video_clip.w) // 2, 0))
    # max_duration = max(video_clip.duration, background_clip.duration) + tail_duration
    if video_clip.duration > background_clip.duration:
        # 背景视频补帧
        background_clip = video_extend(background_clip, video_clip.duration - background_clip.duration)
    else:
        # 背景视频裁剪
        background_clip = background_clip.subclipped(0, video_clip.duration)
    return [background_clip, video_clip]

def get_actual_text_width(text, font_path, font_size):
    """获取文本的实际渲染宽度（像素）"""
    font = ImageFont.truetype(font_path, font_size)
    length = font.getlength(text)
    return int(length)

def generate_text(mark_item, start, duration):
    final_clip_list = []
    content = mark_item['content']
    position_x = mark_item['positionX']
    position_y = mark_item['positionY']
    width = mark_item['width']
    height = mark_item['height']
    font_size = mark_item['fontSize']
    font_color = '#' + mark_item['fontColor']
    font_weight = mark_item['fontWeight']
    align_mode = mark_item['alignMode']
    font_name = mark_item['fontName']
    if font_name is not None and font_name == 'fzlt':
        font = '/app/VideoHandler/fonts/FZLanTing.TTF'
    else:
        font = '/app/VideoHandler/fonts/msyh.ttc'
        if font_weight == 1:
            font = '/app/VideoHandler/fonts/msyhbd.ttc'
    horizontal_align = 'right'
    if align_mode == 0:
        horizontal_align = 'left'
    elif align_mode == 1:
        horizontal_align = 'center'
    text_clip = TextClip(text=content, size=(width, height), font_size=font_size, color=font_color, horizontal_align=horizontal_align, font=font, transparent=True).with_start(start).with_duration(duration).with_position((position_x, position_y))
    final_clip_list.append(text_clip)
    if "hyphen" in mark_item:
        hyphen = mark_item['hyphen']
        if hyphen == 1:
            content_width = get_actual_text_width(content, font, font_size)
            hyphen_length = content_width - font_size
            margin_left = 0
            if align_mode == 1:
                margin_left = (width - content_width) // 2
            elif align_mode == 2:
                margin_left = width - content_width
            strike_clip = ColorClip(size=(hyphen_length, text_clip.h // 12), color=hex_to_rgb(font_color)).with_position((font_size + position_x + margin_left, (text_clip.h // 2) + position_y)).with_start(start).with_duration(duration)
            final_clip_list.append(strike_clip)
    return final_clip_list

def attach_overlay(clip_item, overlay, width, height):
    start = 0
    duration = clip_item.duration
    if 'effect' in overlay:
        effect = overlay['effect']
        if 'duration' in effect:
            duration = effect['duration']
        if 'start' in effect:
            start = effect['start']

    origin_clip_list = []
    if 'template' in overlay:
        template = overlay['template']
        # clip_item自适应调整大小
        position_x = template['positionX']
        position_y = template['positionY']
        position_width = template['width']
        position_height = template['height']
        src = template['src']
        if (clip_item.w / clip_item.h) < (position_width / position_height):
            clip_item = clip_item.resized(width=position_width)
            clip_item = clip_item.cropped(x1=0, y1=(clip_item.h - position_height) // 2, x2=position_height, y2=(clip_item.h + position_height) // 2).with_position((position_x, position_y))
        else:
            clip_item = clip_item.resized(height=position_height)
            clip_item = clip_item.cropped(x1=(clip_item.w - position_width) // 2, y1=0, x2=(clip_item.w + position_width) // 2, y2=position_height).with_position((position_x, position_y))
        template_clip = ImageClip(src).with_start(start).with_duration(duration)


        transparent_clip = ColorClip(size=(width, height), color=(0, 0, 0), duration=duration).with_opacity(1)

        clip_item = CompositeVideoClip((transparent_clip, clip_item))
        origin_clip_list.append(template_clip)
    else:
        clip_item = clip_item.resized((width, height))

    if 'marks' in overlay:
        marks = overlay['marks']
        for mark_item in marks:
            placement_type = mark_item['placementType']
            if placement_type != 5:
                origin_clip_list += generate_text(mark_item, start, duration)
            else:
                logo_width = mark_item['width']
                logo_height = mark_item['height']
                position_x = mark_item['positionX']
                position_y = mark_item['positionY']
                logo_src = mark_item['content']
                logo_clip = ImageClip(logo_src).with_start(start).with_duration(duration).resized((logo_width, logo_height)).with_position((position_x, position_y))

                # logo_clip = ImageClip(logo_src).with_start(start).with_duration(duration).resized((logo_width, logo_height))
                # if 'effect' in mark_item:
                #     effect_info = mark_item['effect']
                #     effect_name = effect_info['name']
                #     effect_start = effect_info['start']
                #     if 'SlideIn' == effect_name:
                #         effect_duration = effect_info['duration']
                #         logo_clip = logo_clip.with_position(lambda t: (slide_in_bounce(t, effect_start, effect_duration, position_x), position_y))
                # else:
                #     logo_clip = logo_clip.with_position((position_x, position_y))
                origin_clip_list.append(logo_clip)

    decorate_clip = CompositeVideoClip(origin_clip_list)

    if 'effect' in overlay:
        effect_info = overlay['effect']
        effect_name = effect_info['name']
        if 'fadeInAndOut' == effect_name:
            decorate_mask = FadeInAndOut(0.5, duration, start).apply(decorate_clip.mask)
            decorate_clip = decorate_clip.with_mask(decorate_mask)
    return [clip_item, decorate_clip]

def integrate_clip_item(clip_info_item, width, height, clip_position, close_clip_list):
    final_clip_list = []
    clip = clip_info_item['clip']
    clip_type = clip['type']
    mix_type = clip['mixType'] if 'mixType' in clip else 0
    # 把视频下载到本地
    clip_item = None

    if clip_type is None or clip_type == 2:
        clip_path = clip['src']
        # 视频
        clip_src = download_file(clip_path, ".mp4")
        clip_item = VideoFileClip(clip_src)

        # 需要回收
        gc_clip = {'path': clip_src, "clip": clip_item}
        close_clip_list.append(gc_clip)
        if mix_type == 1:
            final_clip_list += video_mix(clip_item, width, height)
        elif mix_type == 2:
            background_video_path = video_size_clip(clip_src, width, height)
            background_clip = VideoFileClip(background_video_path)
            final_clip_list += video_mix_double(clip_item, background_clip, width, height, clip_position)

            # 需要回收
            gc_clip = {'path': background_video_path, "clip": background_clip}
            close_clip_list.append(gc_clip)
        elif mix_type == 3:
            overlay = clip_info_item['overlay']
            # 附加商品信息
            final_clip_list += attach_overlay(clip_item, overlay, width, height)
        else:
            final_clip_list.append(clip_item.resized((width, height)))
    else:
        clip_path = clip['src']
        # 图片
        duration = clip['duration']
        clip_item = ImageClip(clip_path).with_duration(duration)
        final_clip_list.append(clip_item.resized((width, height)))
    video_duration = clip_item.duration

    if 'keyPoints' in clip_info_item:
        key_points = clip_info_item['keyPoints']
        if key_points is not None and len(key_points) > 0:
            for key_point_item in key_points:
                key_point_width = key_point_item['width']
                key_point_height = key_point_item['height']
                key_point_pos_x = key_point_item['positionX']
                key_point_pos_y = key_point_item['positionY']
                key_point_src = key_point_item['src']
                if key_point_src.endswith(".gif"):

                    gif_src = download_file(key_point_src, ".gif")
                    gc_clip = {'path': clip_src, "clip": clip_item}

                    key_point_clip = VideoFileClip(gif_src, has_mask=True)
                    key_point_clip_num = int(video_duration / key_point_clip.duration) + 1
                    gif_clips = concatenate_videoclips([key_point_clip for i in range(0, key_point_clip_num)])
                    final_clip_list.append(gif_clips.subclipped(0, video_duration).with_position((key_point_pos_x, key_point_pos_y)).resized((key_point_width, key_point_height)))
                    close_clip_list.append(gc_clip)
                else:
                    key_point_clip = ImageClip(key_point_src).with_duration(video_duration).with_position((key_point_pos_x, key_point_pos_y)).resized((key_point_width, key_point_height))
                    final_clip_list.append(key_point_clip)


    if 'logo' in clip_info_item:
        logo_info = clip_info_item['logo']
        logo_width = logo_info['width']
        logo_height = logo_info['height']
        logo_pos_x = logo_info['positionX']
        logo_pos_y = logo_info['positionY']
        logo_src = logo_info['src']
        logo_clip = ImageClip(logo_src).with_duration(video_duration).with_start(0).with_position((logo_pos_x, logo_pos_y)).resized((logo_width, logo_height))

        final_clip_list.append(logo_clip)

    if 'tip' in clip_info_item:
        tip_info = clip_info_item['tip']
        tip_width = tip_info['width']
        tip_height = tip_info['height']
        tip_pos_x = tip_info['positionX']
        tip_pos_y = tip_info['positionY']
        tip_src = tip_info['src']
        tip_clip = ImageClip(tip_src).with_duration(video_duration).with_start(0).with_position((tip_pos_x, tip_pos_y)).resized((tip_width, tip_height))
        final_clip_list.append(tip_clip)
    return CompositeVideoClip(final_clip_list)

def integrate_video(video_info, out_path):
    close_clip_list = []
    result = False
    try:
        clip_info_list = video_info['clipList']
        manifest_info = video_info['manifest']

        if clip_info_list is not None and len(clip_info_list) > 0:
            width = manifest_info['width']
            height = manifest_info['height']
            clip_position = None
            if 'clipPosition' in manifest_info and manifest_info['clipPosition'] is not None:
                clip_position = manifest_info['clipPosition']
            clip_video_list = []
            for clip_info_item in clip_info_list:
                video_clip = integrate_clip_item(clip_info_item, width, height, clip_position, close_clip_list)
                clip_video_list.append(video_clip)
            if len(clip_video_list) == 1:
                final_clip = clip_video_list[0]
            else:
                final_clip = concatenate_videoclips(clip_video_list)
            if "title" in video_info and video_info['title'] is not None:
                title_info = video_info['title']
                title_width = title_info['width']
                title_height = title_info['height']
                title_pos_x = title_info['positionX']
                title_pos_y = title_info['positionY']
                title_src = title_info['src']
                title_clip = ImageClip(title_src).with_duration(final_clip.duration).with_start(0).with_position((title_pos_x, title_pos_y)).resized((title_width, title_height))
                final_clip = CompositeVideoClip((final_clip, title_clip))
            # 是否存在图片背景
            if "backgroundInfo" in video_info and video_info['backgroundInfo'] is not None:
                background_info = video_info['backgroundInfo']
                src = background_info["src"]
                height = background_info["height"]
                width = background_info["width"]
                position_infos = background_info["positionInfos"]
                pos_w = position_infos["width"]
                pos_h = position_infos["height"]
                pos_x = position_infos["positionX"]
                pos_y = position_infos["positionY"]

                background_clip = ImageClip(src).with_duration(final_clip.duration).resized((width, height))
                final_clip = final_clip.with_position((pos_x, pos_y)).resized((pos_w, pos_h))
                final_clip = CompositeVideoClip((background_clip, final_clip))

            final_clip.write_videofile(out_path, fps=30, threads=2)
            final_clip.close()
            result = True
        # 清理临时文件
        if len(close_clip_list) > 0:
            for gc_clip in close_clip_list:
                if 'clip' in gc_clip:
                    clip = gc_clip['clip']
                    clip.close()
                if 'path' in gc_clip:
                    path = gc_clip['path']
                    delete_file(path)
    except Exception as e:
        raise e
    except SystemExit as se:
        raise se
    return result