#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
豆包语音TTS客户端
基于字节跳动豆包语音API的文本转语音工具
"""

import json
import os
import requests
import aiohttp
import asyncio
import base64
from typing import Optional, Dict, Any
from pathlib import Path
from dotenv import load_dotenv

# 加载环境变量
load_dotenv()


class TTSClient:
    """豆包语音TTS客户端类"""
    
    def __init__(self):
        """
        初始化TTS客户端
        """
        self.config = self._load_config()
        self.session = requests.Session()
        
        # 设置默认请求头
        self.session.headers.update({
            'x-api-key': self.config['api_key'],
            'X-Api-Resource-Id': self.config['api_resource_id'],
            'Connection': 'keep-alive',
            'Content-Type': 'application/json'
        })
    
    def _load_config(self) -> Dict[str, Any]:
        """
        从环境变量加载配置
            
        Returns:
            配置字典
        """
        try:
            # 从环境变量读取TTS配置
            config = {
                'api_key': os.getenv('TTS_API_KEY'),
                'api_resource_id': os.getenv('TTS_API_RESOURCE_ID'),
                'base_url': os.getenv('TTS_BASE_URL'),
                'default_speaker': os.getenv('TTS_DEFAULT_SPEAKER', 'zh_male_beijingxiaoye_emo_v2_mars_bigtts'),
                'default_audio_format': os.getenv('TTS_DEFAULT_AUDIO_FORMAT', 'mp3'),
                'default_sample_rate': int(os.getenv('TTS_DEFAULT_SAMPLE_RATE', '24000')),
                'timeout': int(os.getenv('TTS_TIMEOUT', '30')),
                'max_text_length': int(os.getenv('TTS_MAX_TEXT_LENGTH', '1000')),
                'output_directory': os.getenv('TTS_OUTPUT_DIRECTORY', './output')
            }
            
            # 验证必要的配置项
            required_keys = ['api_key', 'api_resource_id', 'base_url']
            for key in required_keys:
                if not config[key]:
                    raise ValueError(f"环境变量 TTS_{key.upper()} 未设置")
            
            return config
            
        except Exception as e:
            raise ValueError(f"环境变量配置加载失败: {e}")
    
    def _get_default_params(self, speaker: Optional[str], audio_format: Optional[str], sample_rate: Optional[int]) -> tuple:
        """获取默认参数"""
        return (
            speaker or self.config['default_speaker'],
            audio_format or self.config['default_audio_format'],
            sample_rate or self.config['default_sample_rate']
        )
    
    def _build_additions(self, kwargs: dict) -> dict:
        """构建additions参数"""
        additions = {
            "disable_markdown_filter": True,
            "enable_language_detector": True,
            "enable_latex_tn": True,
            "disable_default_bit_rate": True,
            "max_length_to_filter_parenthesis": 0,
            "cache_config": {
                "text_type": 1,
                "use_cache": True
            }
        }
        
        if 'additions' in kwargs:
            additions.update(kwargs['additions'])
        
        return additions
    
    def _process_audio_response(self, response_content: bytes, is_text: bool = True) -> bytes:
        """处理音频响应数据"""
        audio_chunks = []
        
        if is_text:
            try:
                response_text = response_content.decode('utf-8') if isinstance(response_content, bytes) else response_content
                lines = response_text.strip().split('\n')
                
                for line in lines:
                    if not line.strip():
                        continue
                    
                    try:
                        result = json.loads(line)
                        if isinstance(result, dict) and 'data' in result and result['data']:
                            try:
                                chunk_data = base64.b64decode(result['data'])
                                audio_chunks.append(chunk_data)
                            except Exception:
                                continue
                    except json.JSONDecodeError:
                        continue
                        
            except UnicodeDecodeError:
                # 如果无法解码为文本，直接使用二进制数据
                return response_content
        
        if audio_chunks:
            return b''.join(audio_chunks)
        else:
            # 如果没有找到流式数据，可能是直接返回的音频数据
            return response_content if isinstance(response_content, bytes) else response_content.encode()
    
    def _save_audio_file(self, audio_data: bytes, output_file: str) -> None:
        """保存音频文件"""
        output_path = Path(output_file)
        output_path.parent.mkdir(parents=True, exist_ok=True)
        
        with open(output_path, 'wb') as f:
            f.write(audio_data)
    
    def synthesize(
        self,
        text: str,
        speaker: Optional[str] = None,
        output_file: Optional[str] = None,
        audio_format: Optional[str] = None,
        sample_rate: Optional[int] = None,
        **kwargs
    ) -> bytes:
        """
        合成语音
        
        Args:
            text: 要合成的文本
            speaker: 音色选择
            output_file: 输出文件路径（可选）
            audio_format: 音频格式 (mp3, wav, pcm)
            sample_rate: 采样率 (8000, 16000, 24000, 48000)
            **kwargs: 其他参数
            
        Returns:
            音频数据（bytes）
        """
        if not text.strip():
            raise ValueError("文本内容不能为空")
        
        speaker, audio_format, sample_rate = self._get_default_params(speaker, audio_format, sample_rate)
        additions = self._build_additions(kwargs)
        
        payload = {
            "req_params": {
                "text": text,
                "speaker": speaker,
                "additions": json.dumps(additions),
                "audio_params": {
                    "format": audio_format,
                    "sample_rate": sample_rate
                }
            }
        }
        
        try:
            response = self.session.post(
                self.config['base_url'],
                json=payload,
                timeout=self.config['timeout']
            )
            
            response.raise_for_status()
            audio_data = self._process_audio_response(response.text)
            
            if not audio_data or len(audio_data) < 100:
                raise ValueError("未找到有效的音频数据")
            
            if output_file:
                self._save_audio_file(audio_data, output_file)
            
            return audio_data
            
        except requests.exceptions.RequestException as e:
            raise RuntimeError(f"网络请求失败: {e}")
        except Exception as e:
            raise RuntimeError(f"语音合成失败: {e}")
    
    async def synthesize_async(
        self,
        text: str,
        speaker: Optional[str] = None,
        output_file: Optional[str] = None,
        audio_format: Optional[str] = None,
        sample_rate: Optional[int] = None,
        **kwargs
    ) -> bytes:
        """
        异步合成语音，提升响应速度
        
        Args:
            text: 要合成的文本
            speaker: 音色ID
            output_file: 输出文件路径（可选）
            audio_format: 音频格式
            sample_rate: 采样率
            **kwargs: 其他参数
            
        Returns:
            音频数据（bytes）
        """
        if not text.strip():
            raise ValueError("文本内容不能为空")
            
        speaker, audio_format, sample_rate = self._get_default_params(speaker, audio_format, sample_rate)
        additions = self._build_additions(kwargs)
        
        # 构建请求负载（统一格式）
        payload = {
            "req_params": {
                "text": text,
                "speaker": speaker,
                "additions": json.dumps(additions),
                "audio_params": {
                    "format": audio_format,
                    "sample_rate": sample_rate
                }
            }
        }
        
        try:
            timeout = aiohttp.ClientTimeout(total=self.config['timeout'])
            async with aiohttp.ClientSession(timeout=timeout) as session:
                headers = {
                    'x-api-key': self.config['api_key'],
                    'X-Api-Resource-Id': self.config['api_resource_id'],
                    'Connection': 'keep-alive',
                    'Content-Type': 'application/json'
                }
                
                async with session.post(
                    self.config['base_url'],
                    json=payload,
                    headers=headers
                ) as response:
                    response.raise_for_status()
                    response_content = await response.read()
                    
                    audio_data = self._process_audio_response(response_content)
                    
                    if not audio_data or len(audio_data) < 100:
                        raise ValueError(f"音频数据无效或过短: {len(audio_data) if audio_data else 0} 字节")
                    
                    if output_file:
                        self._save_audio_file(audio_data, output_file)
                    
                    return audio_data
                    
        except aiohttp.ClientError as e:
            raise RuntimeError(f"异步网络请求失败: {e}")
        except Exception as e:
            raise RuntimeError(f"异步语音合成失败: {e}")
    
    def get_available_speakers(self) -> list:
        """
        获取可用的音色列表（示例）
        注意：实际的音色列表需要参考官方文档
        
        Returns:
            音色列表
        """
        return [
            "zh_male_beijingxiaoye_emo_v2_mars_bigtts",
            "zh_female_tianmei_emo_v2_mars_bigtts",
            "zh_male_jingqiang_emo_v2_mars_bigtts",
            # 更多音色请参考官方文档
        ]
    
    def validate_config(self) -> bool:
        """
        验证配置是否有效
        
        Returns:
            配置是否有效
        """
        required_keys = [
            'api_key', 'api_resource_id', 'base_url', 'default_speaker', 
            'default_audio_format', 'default_sample_rate'
        ]
        
        return all(key in self.config and self.config[key] for key in required_keys)