import requests
from flask import Flask, request, Response, jsonify
import time
import threading
import uuid
import conf
from datetime import datetime
import random
from moviepy.editor import ColorClip, AudioClip, CompositeVideoClip,TextClip,ImageClip,AudioFileClip
from moviepy.editor import *
import PIL.Image
from moviepy.video.fx.resize import resize
from moviepy.video.fx.margin import margin
from moviepy.video.fx.freeze import freeze
import os
from urllib.parse import urlparse, urlencode, urlunparse, parse_qs


os.environ["USE_OPENCL"] = "TRUE"
print(os.environ["IMAGEMAGICK_BINARY"])

def get_time_str():
    # 获取当前日期和时间
    current_datetime = datetime.now()
    # 格式化日期和时间为字符串
    formatted_datetime = current_datetime.strftime("%Y%m%d%H%M%S")
    return formatted_datetime

def encode_url_after_questionmark(url):
    """
    对URL中的查询参数进行URL编码。
    
    :param url: 包含查询参数的URL字符串。
    :return: 查询参数URL编码后的新URL字符串。
    """
    parsed_url = urlparse(url)
    query_params = parse_qs(parsed_url.query)
    encoded_query_params = urlencode(query_params, doseq=True)
    new_url = urlunparse((
        parsed_url.scheme,
        parsed_url.netloc,
        parsed_url.path,
        parsed_url.params,
        encoded_query_params,
        parsed_url.fragment
    ))
    return new_url

def send_post_progress(progress_url, data):
    # 发送 POST 请求到 callbackUrl
    print(f"进度通知: {progress_url}...")
    progress_url = encode_url_after_questionmark(progress_url)
    response = requests.post(progress_url, json=data)
    if response.status_code != 200:
        print(f"进度通知失败: {progress_url}:{response.json()}")
        return jsonify({"error": "进度通知失败: "+progress_url+":"+response.json()}), 500
def send_post(callback_url, data):
    # 发送 POST 请求到 callbackUrl
    print(f"回调: {callback_url}...")
    callback_url = encode_url_after_questionmark(callback_url)
    print(f"回调(编码后): {callback_url}...")
    response = requests.post(callback_url, json=data)
    if response.status_code != 200:
        print(f"回调失败: {callback_url}:{response.json()}")
        return jsonify({"error": "回调失败: "+callback_url+":"+response.json()}), 500

# 切割视频
def _split_video(video_clip_path,start_time,end_time,without_audio):
    video_clip = VideoFileClip(video_clip_path)
    s=time_to_int(start_time)
    e= time_to_int(end_time)
    video_segment = video_clip.subclip(s, e)
    if without_audio==True:
        video_segment=video_segment.without_audio()
    return video_segment    

def _cal_duration(start_time,end_time):
    start_time = time_to_int(start_time)
    end_time = time_to_int(end_time)

    text_duration = end_time - start_time
    return text_duration

def time_to_int(start_time):
    start_time = start_time.replace(",", "")
    start_time = start_time.replace(":", "")
    start_time = int(start_time) 
    return start_time
def _create_caption(captionJson,font):
    text=captionJson["source"]
    x=captionJson["x"]
    y=captionJson["y"]
    fontSize=captionJson["fontSize"]
    width=captionJson["width"]
    height=captionJson["height"]
    startTime=captionJson["startTime"]
    endTime=captionJson["endTime"]

    text_duration=_cal_duration(startTime,endTime)

    i_start_time=time_to_int(startTime)
    i_end_time=time_to_int(endTime)

    subtitle_clip = TextClip(text, font=font, fontsize=fontSize, color='white', size=(width, height))
    subtitle_clip = subtitle_clip.set_start(i_start_time)
    subtitle_clip = subtitle_clip.set_end(i_end_time)
    subtitle_clip = subtitle_clip.set_duration(text_duration)
    subtitle_clip = subtitle_clip.set_position((x,y))
    return subtitle_clip


def _gen_video_icon(image_path,x,y,width,height,duration):
    scene_icon_clip = ImageClip(image_path, transparent=True).set_duration(duration)
    scene_icon_clip = scene_icon_clip.set_position((x, y))
    scene_icon_clip = scene_icon_clip.resize((width, height))
    scene_icon_clip = margin(scene_icon_clip, top=10, left=10)
    return scene_icon_clip

def _gen_video_cover(config,scene,width,height,font,need_close_clips):
  sceneName = scene["name"]
  sceneDuration=scene["duration"]
  cover_video=None
  scene_icon_clip=None
  if "cover" in scene:
      final_cover_clips=[]
      cover_obj = scene["cover"]
      # 取封面片段
      cover_video_path = cover_obj["source"]
      print(f"{config}-正在处理场景封面：{sceneName}-{cover_video_path}")
      cover_start_time_str = cover_obj["coverStartTime"]
      cover_end_time_str = cover_obj["coverEndTime"] 
      # 切割视频
      cover_clip=_split_video(cover_video_path,cover_start_time_str,cover_end_time_str,True)
      cover_clip = cover_clip.resize((width, height))
      final_cover_clips.append(cover_clip) 

      need_close_clips.append(cover_clip) 
      # 主标题
      if "caption1" in cover_obj:
        caption1 = cover_obj["caption1"]
        c1_clip=_create_caption(caption1,font)
        c1_clip = c1_clip.subclip(0, cover_clip.duration)
        final_cover_clips.append(c1_clip)
      # 副标题
      if "caption2" in cover_obj:
        caption2 = cover_obj["caption2"]
        c2_clip=_create_caption(caption2,font) 
        c2_clip = c2_clip.subclip(0, cover_clip.duration)
        final_cover_clips.append(c2_clip)
      
      # 图标
      icon = cover_obj["icon"]
      if "source" in icon:
          imageSource=icon["source"]
          imageX=icon["x"]
          imageY=icon["y"]
          imageWidth=icon["width"]
          imageHeight=icon["height"]

          # 减去封面时长的图标
          scene_icon_clip=_gen_video_icon(imageSource,imageX,imageY,imageWidth,imageHeight,sceneDuration-cover_clip.duration)
          # 封面时长的图标
          icon_clip=_gen_video_icon(imageSource,imageX,imageY,imageWidth,imageHeight,cover_clip.duration)

          final_cover_clips.append(icon_clip)
          print(f"{config}-完成处理场景封面图标：{sceneName}-{imageSource}")
      # 合成封面视频
    #   cover_video = CompositeVideoClip(final_cover_clips,duration=cover_clip.duration)
      cover_video = CompositeVideoClip(final_cover_clips)
      # 音乐
      if "audio" in cover_obj:
        audio = cover_obj["audio"]
        if "source" in icon:
            audioSource=audio["source"]

            audioStartTime=audio["startTime"]
            audioEndTime=audio["endTime"] 

            iStartTime=time_to_int(audioStartTime)
            iEndTime=time_to_int(audioEndTime) 

            audio_duration=iEndTime-iStartTime
            if audio_duration>cover_clip.duration:
                # 音乐时长大于封面时长
                print(f"{config}-封面音乐长度大于封面视频长度，自动截取：{cover_clip.duration}s")
                iEndTime=iStartTime+cover_clip.duration

            audio_clip = AudioFileClip(audioSource)
            tmp_audio_clip = audio_clip.subclip(iStartTime, iEndTime)

            volume=audio["volume"]
            tmp_audio_clip=tmp_audio_clip.volumex(volume)
            
            # 设置封面音乐
            cover_video=cover_video.set_audio(tmp_audio_clip)

            need_close_clips.append(audio_clip) 
            need_close_clips.append(tmp_audio_clip) 

            print(f"{config}-完成处理场景封面音乐：{sceneName}-{audioSource}")
  need_close_clips.append(cover_video)         
  return cover_video,scene_icon_clip



def _gen_video_scene(config,scene,width,height,scene_icon_clip,rest_duration,gened_duration,font,need_close_clips):
  sceneName = scene["name"]
  sceneDuration=scene["duration"]
  if rest_duration<sceneDuration:
      print(f"{config}-{sceneName}场景时长大于剩余时长，自动截取：{rest_duration}s")
      sceneDuration=rest_duration

  # 处理视频片段
  scene_video=None
  source_video_clips=[]    
  source_caption_clips=[]
  source_audio_clips=[]
  source_voice_clips=[]
  if "sourceList" in scene:
      sourceList = scene["sourceList"]
      for source in sourceList:
          sourceType = source["type"]
          
        #   startTime = source["startTime"]
        #   endTime = source["endTime"]

        #   iStartTime=time_to_int(startTime)
        #   iEndTime=time_to_int(endTime)
          
          # 处理视频源
          print(f"{config}-{sceneName}正在处理素材：{source['source']}")

          if sourceType == "video":
              # 视频
              source_path = source["source"]
              source_start_time_str = source["startTime"]
              source_duration = source["duration"]
              source_with_voice = source["voice"]=="1"

              source_start_time=time_to_int(source_start_time_str)
              source_end_time=source_start_time+source_duration

              source_video_clip = VideoFileClip(source_path)
              target_video_clip = source_video_clip.subclip(source_start_time, source_end_time)

              if source_with_voice==False:
                  target_video_clip=target_video_clip.without_audio()
              
              # 设置宽高
              target_video_clip = target_video_clip.resize((width, height))
              source_video_clips.append(target_video_clip)

              need_close_clips.append(target_video_clip)

          if sourceType == "caption":
              # 字幕
              playVoice = source["voice"] == "1"
              if playVoice:
                  print(f"{config}-{sceneName}字幕转音频（暂未实现）：{source['source']}")
              text=source["source"]
              x=source["x"]
              y=source["y"]
              fontSize=source["fontSize"]

              start_time_str = source["startTime"]
              source_duration = source["duration"]
              
              caption_start_time=time_to_int(start_time_str)
              caption_end_time = caption_start_time + source_duration

              added_caption_durations=0
              for added_caption_clip in source_caption_clips:
                  added_caption_durations+=added_caption_clip.duration
             
              if caption_start_time>added_caption_durations:
                  #如果播放时间前还有空白，添加一段空白片段
                  tmp_duration=caption_start_time-added_caption_durations
                  tmp_clip = ColorClip(size=(width, height), color=(0, 0, 0), duration=tmp_duration)
                  tmp_clip = tmp_clip.set_opacity(0)
                  source_caption_clips.append(tmp_clip)
              print(f"生成语音字幕:{text}")
              try:
                subtitle_clip = TextClip(text, font=font, fontsize=fontSize, color='white', size=(width, height))
                subtitle_clip = subtitle_clip.set_start(caption_start_time)
                subtitle_clip = subtitle_clip.set_end(caption_end_time)
                subtitle_clip = subtitle_clip.set_duration(source_duration)
                subtitle_clip = subtitle_clip.set_position((x,y))
                source_caption_clips.append(subtitle_clip)
              except Exception as e:
                print(f"生成语音字幕错误:{text}:{e}")

          if sourceType == "voice":
              source_start_time_str = source["sourceStartTime"]
              source_end_time_str = source["sourceEndTime"]
              # 什么时候开始播放音频
              start_time_str = source["startTime"]
              end_time_str = source["endTime"]

              start_time=time_to_int(start_time_str)
              end_time=time_to_int(end_time_str)

              audioSource=source["source"]
              audio_clip = AudioFileClip(audioSource)
              if audio_clip.duration< end_time:
                  # 如果音频素材时间小于结束时间,那么结束时间为音频结束时间
                  end_time= audio_clip.duration

              # 音频持续时间
              audio_duration=end_time-start_time

              if audio_duration<=0:
                  print(f"{config}-{sceneName}口播素材时间不足，忽略：{source['source']}")
                  continue


              if start_time<sceneDuration:
                  # 开始时间小于场景总时间，才能播放
                  if end_time>sceneDuration:
                      # 如果结束时间大于了场景时间，则截取
                      audio_duration= audio_duration- (end_time-sceneDuration)

              # 截取原素材的片段
              source_start_time=time_to_int(source_start_time_str)
              source_end_time=time_to_int(source_end_time_str)

              sub_duration=source_end_time-source_start_time
              if sub_duration>audio_duration:
                  # 如果截取时间超过了需要的时间，则最大按需要的时间截取
                  source_end_time=source_start_time+audio_duration
                  
              # 音频
              tmp_audio_clip = audio_clip.subclip(source_start_time, source_end_time)

              volume=source["volume"]
              tmp_audio_clip=tmp_audio_clip.volumex(volume)
              source_voice_clips.append(tmp_audio_clip)  

              need_close_clips.append(tmp_audio_clip)
              need_close_clips.append(audio_clip)

          if sourceType == "audio":
              source_start_time_str = source["sourceStartTime"]
              source_end_time_str = source["sourceEndTime"]
              # 什么时候开始播放音频
              start_time_str = source["startTime"]
              end_time_str = source["endTime"]

              start_time=time_to_int(start_time_str)
              end_time=time_to_int(end_time_str)
              # 音频持续时间
              audio_duration=end_time-start_time

              if start_time<sceneDuration:
                  # 开始时间小于场景总时间，才能播放
                  if end_time>sceneDuration:
                      # 如果结束时间大于了场景时间，则截取
                      audio_duration= audio_duration- (end_time-sceneDuration)

              # 截取原素材的片段
              source_start_time=time_to_int(source_start_time_str)
              source_end_time=time_to_int(source_end_time_str)

              sub_duration=source_end_time-source_start_time
              if sub_duration>audio_duration:
                  # 如果截取时间超过了需要的时间，则最大按需要的时间截取
                  source_end_time=source_start_time+audio_duration
                  
              # 音频
              audioSource=source["source"]
              audio_clip = AudioFileClip(audioSource)
              tmp_audio_clip = audio_clip.subclip(source_start_time, source_end_time)
              
              volume=source["volume"]
              tmp_audio_clip=tmp_audio_clip.volumex(volume)
              source_audio_clips.append(tmp_audio_clip)

              need_close_clips.append(tmp_audio_clip)
              need_close_clips.append(audio_clip)

          print(f"{config}-{sceneName}完成处理素材：{source['source']}")
      # 组合场景的视频
      source_video= concatenate_videoclips(source_video_clips) 
     
      # 叠加
      compsoite_clips=[source_video]
      if scene_icon_clip!=None:
          compsoite_clips.append(scene_icon_clip)

      source_caption= concatenate_videoclips(source_caption_clips)
      if source_caption!=None:
          compsoite_clips.append(source_caption)

      # 场景限制秒数  
      target_duration=source_video.duration
      if target_duration>sceneDuration:
        print(f"{config}-场景限制长度小于片段长度，自动截取：{sceneDuration}s")
        target_duration=sceneDuration

    #   scene_video = CompositeVideoClip(compsoite_clips,duration=target_duration)
      scene_video = CompositeVideoClip(compsoite_clips)

      source_audio=None
      if len(source_audio_clips)>0:
        source_audio= concatenate_audioclips(source_audio_clips)
        scene_video=scene_video.set_audio(source_audio)

      if len(source_voice_clips)>0:
        # 音频
        source_voice= concatenate_audioclips(source_voice_clips)
        if source_audio!=None:
          source_audio= CompositeAudioClip([source_audio,source_voice])
        else:
          source_audio=source_voice
        scene_video=scene_video.set_audio(source_audio)

      need_close_clips.append(scene_video)
  return scene_video

# 生成视频实现
def _gen_video_impl(config, jsonObj, callback_url, progress_url):
    print(f"{config}-开始生成视频...")
    print(f"{config}-生成配置：", jsonObj)

    videoCount = jsonObj["videoCount"]
    width = jsonObj["videoSize"]["width"]
    height = jsonObj["videoSize"]["height"]
    videoMinDuration = jsonObj["videoMinSecs"]
    videoMaxDuration = jsonObj["videoMaxSecs"]
    fps = jsonObj["fps"]
    font= jsonObj["fontFamily"]
    outputPath = jsonObj["output"]
    if outputPath=="":
        outputPath="."
    scenes = jsonObj["scenes"]

    genedVideoList = []
    

    for curVideoIndex in range(videoCount):
        print(f"{config}-正在生成第 {curVideoIndex + 1} 个视频...")
        time.sleep(1)
        # 输出视频时长
        output_duration = random.randint(videoMinDuration, videoMaxDuration)
        # tmp_total_duration=0
        print(f"{config}-正在生成第 {curVideoIndex + 1} 个视频, 目标视频时长：{output_duration}s")

        # video_clip = ColorClip(size=(width, height), color=(0, 0, 0), duration=0)
        # 视频所有片段
        need_close_clips=[]
        video_clips=[]
        gened_duration=0
        for scene in scenes:
            sceneName = scene["name"]
            # sceneDuration=scene["duration"]
            # if tmp_total_duration+sceneDuration>output_duration:
            #     print(f"{config}-忽略场景，已满足视频长度：{output_duration} 场景: {sceneName}  场景长度{sceneDuration}")
            #     break
            # tmp_total_duration=tmp_total_duration+sceneDuration

            

            scene_clips=[]
            # 处理封面(每个场景都支持封面，可以不设置)
            reset_duration=output_duration-gened_duration
            if reset_duration<=0:
                print(f"{config}-忽略场景，已满足视频长度：{output_duration} 场景: {sceneName}")
                break

            cover_video,scene_icon_clip=_gen_video_cover(config,scene,width,height,font,need_close_clips)
            if cover_video!=None:
                scene_clips.append(cover_video)
                gened_duration+=cover_video.duration

                
            # 场景视频   
            reset_duration=output_duration-gened_duration
            if reset_duration<=0:
                print(f"{config}-忽略场景，已满足视频长度：{output_duration} 场景: {sceneName}")
                break 
            scene_video=_gen_video_scene(config,scene,width,height,scene_icon_clip,reset_duration,gened_duration,font,need_close_clips)
            scene_clips.append(scene_video)
            # scene_video.write_videofile("output/tmp2.mp4", codec='libx264')

            gened_duration+=scene_video.duration

            if len(scene_clips)>0:
                # 总片段追加这个场景的片段
                video_clips=video_clips+scene_clips

            print(f"{config}-完成处理场景封面：{sceneName}")
 

        # 输出视频文件路径
        file_name = get_time_str()
        output_video_filename = os.path.join(outputPath, f"{file_name}.mp4")
        output_video_filename=output_video_filename.replace("\\","/")

        # 合成片段
        final_clip = concatenate_videoclips(video_clips) 
        if final_clip.duration<output_duration:
            print(f"{config}-视频长度不足，自动调整为片段最大长度：{final_clip.duration}s")
            output_duration=final_clip.duration

        final_clip = final_clip.subclip(0, output_duration)
        # 帧率
        final_clip.fps=fps
        # 输出
        try:
            # 写入视频文件
            # final_clip.write_videofile(output_video_filename, codec='libx264')
            final_clip.write_videofile(output_video_filename, threads=16,verbose=False, codec='h264_nvenc', ffmpeg_params=['-preset', 'fast','-b:v','8000k'])
        finally:
            # 关闭视频文件
            final_clip.close()
        for tmp_clip in need_close_clips:
            try:
                tmp_clip.close()
            except Exception as e:
                print(f"关闭片段失败:{e}")

        # 回调数据
        genedVideoList.append(output_video_filename)
        print(f"{config}-第 {curVideoIndex + 1} 个视频生成完成.")
        send_post_progress(progress_url, {"id": config, "progress": divide_and_format(curVideoIndex, videoCount)})

    print(f"{config}-生成视频完成.")
    send_post_progress(progress_url, {"id": config, "progress": "100"})
    # 生成完成，通知调用方
    notifyContent = {"id": config, "genedVideoList": genedVideoList}
    send_post(callback_url, notifyContent)
    print(f"{config}-已发送回调.")

def divide_and_format(a: int, b: int) -> str:
    # 先执行除法操作
    result = a / b
    # 使用 format() 函数将结果格式化为带有最多两位小数的字符串
    formatted_result = format(result, ".2f")
    return formatted_result

def gen_video_impl(config, jsonStr, callback_url, progress_url):
    print(f"{config}-创建生成视频任务...")
    thread = threading.Thread(
        target=_gen_video_impl, args=(config, jsonStr, callback_url, progress_url)
    )  # 创建一个新线程来执行 async_function
    thread.start()  # 启动新线程
    print(f"{config}-已创建生成视频任务")


def get_video_length(file_path):
    source_video_clip = VideoFileClip(file_path)
    duration = source_video_clip.duration
    source_video_clip.close()
    return f"{duration}"
