import edge_tts
import asyncio
import edge_tts
import os
import tempfile
from flask import Flask, request, jsonify, send_from_directory
from flask_cors import CORS

app = Flask(__name__)
CORS(app)  # 允许跨域请求

# 创建一个目录来存储生成的音频文件
AUDIO_DIR = os.path.join(tempfile.gettempdir(), 'edge_tts_audio')
os.makedirs(AUDIO_DIR, exist_ok=True)

@app.route('/tts', methods=['POST'])
def tts():
    data = request.json
    text = data.get('text')
    voice = data.get('voice', 'zh-CN-XiaoyiNeural')  # 默认中文女声
    rate = data.get('rate', '+0%')
    pitch = data.get('pitch', '+0Hz')
    volume = data.get('volume', '+0%')
    generate_subtitles = data.get('generate_subtitles', False)

    if not text:
        return jsonify({'error': 'Text is required'}), 400

    try:
        audio_file_path, vtt_content = asyncio.run(generate_audio(text, voice, rate, pitch, volume, generate_subtitles))
        audio_url = f'/audio/{os.path.basename(audio_file_path)}'
        return jsonify({'audio_url': audio_url, 'vtt': vtt_content})
    except Exception as e:
        app.logger.error(f"TTS Error: {e}")
        return jsonify({'error': str(e)}), 500

@app.route('/audio/<filename>')
def serve_audio(filename):
    return send_from_directory(AUDIO_DIR, filename)

async def generate_audio(text, voice, rate, pitch, volume, generate_subtitles):
    audio_file_path = os.path.join(AUDIO_DIR, f"audio_{os.urandom(8).hex()}.mp3")
    vtt_content = None
    word_boundaries = []

    communicate = edge_tts.Communicate(text, voice, rate=rate, pitch=pitch, volume=volume)
    with open(audio_file_path, "wb") as file:
        async for chunk in communicate.stream():
            if chunk["type"] == "audio":
                file.write(chunk["data"])
            elif chunk["type"] == "WordBoundary":
                word_boundaries.append(chunk)

    if generate_subtitles:
        vtt_content = "WEBVTT\n\n"
        app.logger.debug(f"Word Boundaries: {word_boundaries}") # Add this line for debugging
        # Group word boundaries into subtitle cues based on a simple time threshold or sentence end.
        # This is a basic implementation. A more advanced one would involve NLP for sentence segmentation.

        current_cue_text = []
        for i, boundary in enumerate(word_boundaries):
            word_text = boundary.get("text", "").replace('\n', ' ').strip()
            word_start_ms = boundary.get("offset", 0) / 10000 # Convert 100-nanosecond units to milliseconds
            word_duration_ms = boundary.get("duration", 0) / 10000 # Convert 100-nanosecond units to milliseconds
            word_end_ms = word_start_ms + word_duration_ms

            if not current_cue_text: # Start of a new cue
                current_cue_start_ms = word_start_ms

            current_cue_text.append(word_text)

            is_cue_end = False
            # Check for sentence-ending punctuation
            if any(p in word_text for p in ['.', '?', '!', '。', '？', '！']):
                is_cue_end = True

            # If it's the last word, always end the cue
            if i == len(word_boundaries) - 1:
                is_cue_end = True

            # If not ending by punctuation or last word, check for a significant pause
            if not is_cue_end and i < len(word_boundaries) - 1:
                next_word_start_ms = word_boundaries[i+1].get("audio_offset", 0)
                if (next_word_start_ms - word_end_ms > 500): # More than 0.5 seconds pause
                    is_cue_end = True

            # Heuristic: force cue end if current cue text is too long (e.g., more than 10 words)
            # Prioritize sentence-ending punctuation over word count for cue splitting.
            if not is_cue_end and len(current_cue_text) >= 10:
                is_cue_end = True

            if is_cue_end:
                # Format time to HH:MM:SS.mmm
                def format_time(ms):
                    h = int(ms // 3600000)
                    ms %= 3600000
                    m = int(ms // 60000)
                    ms %= 60000
                    s = ms / 1000
                    return f"{h:02}:{m:02}:{s:06.3f}"

                vtt_content += f"{format_time(current_cue_start_ms)} --> {format_time(word_end_ms)}\n"
                vtt_content += f'{" ".join(current_cue_text)}\n'
                vtt_content += '\n'
                current_cue_text = []

    return audio_file_path, vtt_content

if __name__ == '__main__':
    app.run(debug=True, port=5000)