import json
from http import HTTPStatus
from urllib.parse import urlparse, unquote
from pathlib import PurePosixPath
import requests
from dashscope import ImageSynthesis
import pyttsx3
from moviepy import ImageClip, AudioFileClip, concatenate_videoclips, CompositeVideoClip
from moviepy.video.tools.subtitles import SubtitlesClip, TextClip
from openai import OpenAI
from dotenv import load_dotenv
import os
import asyncio

load_dotenv()

llm = OpenAI(
    base_url=os.getenv("DASHSCOPE_BASE_URL"),
    api_key=os.getenv("DASHSCOPE_API_KEY"),
)

text_llm = os.getenv("TEXT_LLM")

class FairytaleVideo:
    def __init__(self):
        # 文字转语音初始化
        self.audio_engine = pyttsx3.init()

    async def run(self, topic: str):
        # 1. 生成童话故事
        fairytale = await self._generate_fairytale(topic)
        # 2. 将童话故事分割成多个片段
        sentences = await self._split_fairytale(fairytale)
        # 3. 为每个片段生成“文生图”的提示词
        prompts = await self._generate_prompts(sentences)
        # 4. 根据提示词生成图片
        await self._generate_images(prompts)
        # with open("sentences.json", "r", encoding="utf-8") as f:
        #     sentences = json.load(f)
        # 5. 根据分割后的片段，生成语音
        await self._generate_audio(sentences)
        # 6. 将图片、片段内容（字幕）、语音合成视频
        await self._generate_video(sentences)

    async def _generate_fairytale(self, topic: str) -> str:
        response = llm.chat.completions.create(
            model = text_llm,
            messages=[{
                "role": "system",
                "content": "你是专业的童话故事大师，能够根据用户输入的主题创作出童话故事完整内容。如果用户输入的主题所涉及的故事在历史中存在过，那么就按照历史故事大纲来；否则就自行构造故事情节。"
            },{
                "role": "user",
                "content": f"帮我生成一个主题为: “{topic}”的童话故事完整内容。故事内容要尽量详细，不可省略剧情。只要返回故事内容，不要返回任何其他的额外信息。"
            }]
        )
        content = response.choices[0].message.content  # 返回生成的故事内容
        return content

    async def _split_fairytale(self, fairytale: str) -> list[str]:
        response = llm.chat.completions.create(
            model = text_llm,
            messages=[{
                "role": "user",
                "content": f"故事内容为：{fairytale}，请按照故事的情节，将故事内容划分为多个片段，每个片段仅描述一件事，且宜短不宜长，然后将这些片段内容直接放到JSON格式的数组中返回。"
            }],
            response_format={"type": "json_object"}
        )
        sentences: list[str] = json.loads(response.choices[0].message.content)
        #with open("sentences.json", "w", encoding="utf-8") as f:
        #    json.dump(sentences, f, ensure_ascii=False)
        return sentences

    async def _generate_prompts(self, sentences: list[str]) -> list[str]:
        response = llm.chat.completions.create(
            model=text_llm,
            messages=[{
                "role": "system",
                "content": "你是专业的“大模型文生图”的提示词生成专家。能够根据用户输入的故事片段，结合故事情节，为每个片段生成“文生图”提示词，提示词只描述场景，不描述故事内容，场景描述要尽量详细，包括场景中的人物形象、背景、其他元素等。提示词描述的图片要童话风格，角色在每个提示词中的描述应该整体保持一致！"
            }, {
                "role": "user",
                "content": f"用户输入的剧情片段为：{sentences}，请为每个片段生成“文生图”提示词，并将这些提示词直接放到JSON格式的数组中返回。"
            }],
            response_format={"type": "json_object"}
        )
        prompts = json.loads(response.choices[0].message.content)
        with open("prompts.json", "w", encoding="utf-8") as f:
            json.dump(prompts, f, ensure_ascii=False)
        return prompts

    async def _generate_images(self, prompts: list[str]) -> None:
        for index, prompt in enumerate(prompts):
            while True:
                print(f'----生成第{index}张图片，提示词为: {prompt}----')
                rsp = ImageSynthesis.call(api_key=os.getenv("DASHSCOPE_API_KEY"),
                                          model="wanx2.1-t2i-turbo",
                                          prompt=prompt + "图片风格为童话绘本类型，不要写实!",
                                          n=1,
                                          size='1440*810')
                print('response: %s' % rsp)
                if rsp.status_code == HTTPStatus.OK:
                    # 在当前目录下保存图片
                    results = rsp.output.results
                    if len(results) > 0:
                        result = results[0]
                        filename = PurePosixPath(unquote(urlparse(result.url).path)).parts[-1]
                        with open(f'images/{index}{os.path.splitext(filename)[-1]}', 'wb+') as f:
                            f.write(requests.get(result.url).content)
                        break
                    else:
                        print('sync_call Failed, status_code: %s, code: %s, message: %s' %
                              (rsp.status_code, rsp.code, rsp.message))

    async def _generate_audio(self, sentences: list[str]) -> None:
        for index, sentence in enumerate(sentences):
            self.audio_engine.save_to_file(sentence, f"audios/{index}.mp3")
            self.audio_engine.runAndWait()
            print(f"生成了第{index}个音频")



    async def _generate_video(self, sentences: list[str]) -> None:
        image_clips = []
        subtitles = []
        subtitle_start_time = 0
        # 获取实际生成的图片数量
        image_files = [f for f in os.listdir("images") if f.endswith((".png", ".jpg"))]
        image_count = len(image_files)
        if image_count == 0:
            print("错误：未生成任何图片，无法合成视频")
            return

        for index in range(image_count):
            sentence = sentences[index] if index < len(sentences) else ""
            image_path = f"images/{index}.png"  # 假设图片按索引命名
            if not os.path.exists(image_path):
                print(f"警告：图片 {image_path} 不存在，跳过片段 {index}")
                continue

            audio_path = f"audios/{index}.mp3"
            if not os.path.exists(audio_path):
                print(f"警告：音频 {audio_path} 不存在，跳过片段 {index}")
                continue

            audio_clip = AudioFileClip(audio_path)
            audio_duration = audio_clip.duration
            image_clip = ImageClip(img=image_path, duration=audio_duration)
            image_clip.audio = audio_clip

            # 如果字幕长度超过40个字，那么就换行
            if len(sentence) > 40:
                sentence = sentence[:len(sentence) // 2] + "\n" + sentence[len(sentence) // 2:]
            subtitle = ((subtitle_start_time, subtitle_start_time + audio_duration), sentence)
            subtitles.append(subtitle)

            # 重新设置下一个字幕的起始时间
            subtitle_start_time += audio_duration

            image_clips.append(image_clip)

            # 将所有图片的clip合成一个clip
            video_clip = concatenate_videoclips(image_clips)
            # 生成字幕对象
            generator = lambda text: TextClip(text=text, font='./fonts/ariblk.ttf',
                                              font_size=30, color='white',
                                              stroke_width=2, stroke_color="black")
            subtitle_clip = SubtitlesClip(subtitles, make_textclip=generator, encoding='utf-8')
            subtitle_clip.pos = lambda t: ("center", video_clip.h - 100)

            # 将字幕和图片组成的video_clip，重叠在一起
            video = CompositeVideoClip([video_clip, subtitle_clip])
            video.write_videofile(filename="output.mp4", fps=24)
        print("视频制作完成!")

async def main():
    generator = FairytaleVideo()
    story = await generator.run("小红帽")
    print(story)

if __name__ == '__main__':
    asyncio.run(main())