import os
import time
import ctypes
import threading
import logging
import sdl2
import numpy as np
import pyaudio
import dashscope
from PIL import Image
from callback import Callback
from phoneme_mapping import load_phoneme_mapping, phoneme_to_viseme, DEFAULT_VISEME
from viseme_controller import VisemeController, choose_frames
from lottie_players import LottiePlayerStatic, LottiePlayerDynamicForward, LottiePlayerDynamicReverse
from eye_player import EyesCachePlayer, StaticMouthCache
from text_render import render_text_image, render_viseme_line, pil_image_to_texture
from dashscope.audio.tts import SpeechSynthesizer, SpeechSynthesisResult

logging.basicConfig(level=logging.INFO, format='[%(asctime)s] %(message)s')
logger = logging.getLogger("lottie_player")

SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
ASSET_DIR = os.path.join(SCRIPT_DIR, "assets")

dashscope.api_key = "sk-6def41528a7d4d879b558648cf8bd2b4"
SCREEN_WIDTH, SCREEN_HEIGHT = 720, 720
Eyes_JSON_PATH = os.path.join(ASSET_DIR, "default_eyes_3.json")
Mouth_JSON_PATH = os.path.join(ASSET_DIR, "src", "mouth_8.json")
FONT_PATH = "/usr/share/fonts/opentype/noto/NotoSansCJK-Regular.ttc"
TEXT_TO_SYNTHESIZE = "上海是一座充满活力的现代化大都市，从外滩的万国建筑群到陆家嘴的摩天大楼。"

# 全局变量，作为模块的参数输入
viseme_lock = threading.Lock()
word_timeline = []
viseme_timeline = []
start_play_time = None
start_triggered = False
synthesis_finished = threading.Event()
rendered_words = set()

cb = Callback(viseme_timeline, word_timeline, viseme_lock, synthesis_finished)

# 检测音频流的变量，PCM检测
start_triggered = False

viseme_forward_players = {}
viseme_reverse_players = {}
transition_players = {}
transition_reverse_players = {}

ASSET_SRC_DIR = os.path.join(ASSET_DIR, "src")
for i in range(1, 8):
    name = f"M{i}"
    path = os.path.join(ASSET_SRC_DIR, f"{name}.json")
    if os.path.exists(path):
        viseme_forward_players[name] = LottiePlayerDynamicForward(path)
        print(f"✅ 载入嘴型正播: {path}")
        viseme_reverse_players[name] = LottiePlayerDynamicReverse(path)
        print(f"✅ 载入嘴型倒播: {path}")
for from_i in range(1, 8):
    for to_i in range(from_i + 1, 8):
        key = f"M{from_i}_M{to_i}"
        path = os.path.join(ASSET_SRC_DIR, f"{key}.json")
        if os.path.exists(path):
            transition_players[key] = LottiePlayerDynamicForward(path)
            transition_reverse_players[key] = LottiePlayerDynamicReverse(path)
            print(f"✅ 载入过渡动画正播/倒播: {key}")

default_mouth_player = LottiePlayerStatic(Mouth_JSON_PATH)

def main():
    global start_play_time, static_mouth_cache
    sdl2.SDL_Init(sdl2.SDL_INIT_VIDEO)
    window = sdl2.SDL_CreateWindow(b"LipSync Dynamic", sdl2.SDL_WINDOWPOS_CENTERED, sdl2.SDL_WINDOWPOS_CENTERED,
                                    SCREEN_WIDTH, SCREEN_HEIGHT, sdl2.SDL_WINDOW_SHOWN)
    renderer = sdl2.SDL_CreateRenderer(window, -1, sdl2.SDL_RENDERER_ACCELERATED | sdl2.SDL_RENDERER_PRESENTVSYNC)

    eyes_player = EyesCachePlayer(Eyes_JSON_PATH, renderer)
    static_mouth_cache = StaticMouthCache(default_mouth_player, renderer)
    viseme_controller = VisemeController(
        viseme_forward_players, 
        transition_players, 
        viseme_reverse_players, 
        transition_reverse_players, 
        default_mouth_player, 
        static_mouth_cache, 
        word_timeline,
        viseme_timeline
    )

    load_phoneme_mapping(os.path.join(ASSET_DIR, "py2phoneMap.txt"))

    threading.Timer(5.0, lambda: SpeechSynthesizer.call(
        model='sambert-zhichu-v1',
        text=TEXT_TO_SYNTHESIZE,
        sample_rate=48000,
        format='pcm',
        word_timestamp_enabled=True,
        phoneme_timestamp_enabled=True,
        callback=cb
    )).start()

    running = True
    start_time = time.time() * 1000
    while running:
        if not cb.start_play_time:
            sdl2.SDL_SetRenderDrawColor(renderer, 200, 180, 165, 1)
            sdl2.SDL_RenderClear(renderer)
            eyes_player.render()
            static_mouth_cache.render(renderer)
            sdl2.SDL_RenderPresent(renderer)
            time.sleep(0.01)
            continue

        event = sdl2.SDL_Event()
        while sdl2.SDL_PollEvent(ctypes.byref(event)):
            if event.type == sdl2.SDL_QUIT:
                running = False

        now = time.time() * 1000
        delta = now - cb.start_play_time if cb.start_play_time else now - start_time

        sdl2.SDL_SetRenderDrawColor(renderer, 200, 180, 165, 1)
        sdl2.SDL_RenderClear(renderer)
        eyes_player.render()
        # print(f"[主循环] 当前时间 delta_ms={delta}，viseme_timeline 长度={len(viseme_timeline)}")
        with viseme_lock:
            viseme_controller.update(viseme_timeline, delta)
            # print("当前时间", delta, "viseme_timeline 长度", len(viseme_timeline))
            viseme_controller.render_current_frame(renderer)
        sdl2.SDL_RenderPresent(renderer)

    sdl2.SDL_DestroyRenderer(renderer)
    sdl2.SDL_DestroyWindow(window)
    sdl2.SDL_Quit()

if __name__ == "__main__":
    main()