# speech_interaction_baidu.py - 使用百度语音识别API替换豆包
import io
import os
import logging
import tempfile
import json
import base64
import asyncio
import requests
import numpy as np
import time
import uuid
import wave
from typing import Dict, Any, List, Optional, Union, Tuple
from pydantic import BaseModel
from dotenv import load_dotenv

import logging
logger = logging.getLogger(__name__)

# 安装百度AI SDK: pip install baidu-aip
try:
    from aip import AipSpeech
except ImportError:
    print("请安装百度AI SDK: pip install baidu-aip")
    AipSpeech = None

# Load environment variables
load_dotenv()

# 百度语音识别配置
BAIDU_APP_ID = os.getenv("BAIDU_APP_ID", "your_app_id")
BAIDU_API_KEY = os.getenv("BAIDU_API_KEY", "your_api_key")
BAIDU_SECRET_KEY = os.getenv("BAIDU_SECRET_KEY", "your_secret_key")

# Set up logging
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
)
logger = logging.getLogger(__name__)

# Constants
SUPPORTED_DIALECTS = ["mandarin", "cantonese", "sichuan", "standard", "english"]
DEFAULT_DIALECT = "mandarin"
AUDIO_SAMPLE_RATE = 16000
MAX_AUDIO_LENGTH = 60  # seconds

# 方言映射为百度支持的语言代码
BAIDU_DIALECT_MAPPING = {
    "mandarin": 1536,  # 普通话(支持简单的英文识别)
    "cantonese": 1637,  # 粤语
    "sichuan": 1737,  # 四川话
    "standard": 1536,  # 标准中文
    "english": 1737  # 英语(使用通用模型)
}

# 百度TTS声音映射 (如果需要TTS功能)
BAIDU_VOICE_MAPPING = {
    "mandarin": [0, 1, 3, 4],  # 女声、男声、度逍遥、度丫丫
    "cantonese": [0],  # 女声
    "english": [0, 1]  # 女声、男声
}


# Models for request/response (保持与原有接口一致)
class SpeechRecognitionRequest(BaseModel):
    audio_base64: str
    dialect: Optional[str] = DEFAULT_DIALECT


class SpeechSynthesisRequest(BaseModel):
    text: str
    dialect: Optional[str] = DEFAULT_DIALECT
    voice_id: Optional[str] = None
    speed: Optional[float] = 1.0


class DialectDetectionRequest(BaseModel):
    audio_base64: str


class SpeechRecognitionResponse(BaseModel):
    transcription: str
    confidence: Optional[float] = None
    dialect: Optional[str] = None
    language_code: Optional[str] = None


class SpeechSynthesisResponse(BaseModel):
    audio_base64: str
    duration: Optional[float] = None


class DialectInfo(BaseModel):
    dialect: str
    confidence: float



class AudioUtils:
    """Utility functions for audio processing"""

    @staticmethod
    def decode_audio_base64(audio_base64: str) -> bytes:
        """Decode base64 audio to bytes"""
        try:
            audio_bytes = base64.b64decode(audio_base64)
            return audio_bytes
        except Exception as e:
            logger.error(f"Error decoding audio: {str(e)}")
            raise ValueError(f"Invalid audio data: {str(e)}")

    @staticmethod
    def encode_audio_base64(audio_bytes: bytes) -> str:
        """Encode audio bytes to base64"""
        try:
            audio_base64 = base64.b64encode(audio_bytes).decode('utf-8')
            return audio_base64
        except Exception as e:
            logger.error(f"Error encoding audio: {str(e)}")
            raise ValueError(f"Invalid audio data: {str(e)}")

    @staticmethod
    def save_temp_audio(audio_data: bytes, file_ext: str = ".wav") -> str:
        """Save audio data to temporary file"""
        try:
            temp_file = tempfile.NamedTemporaryFile(suffix=file_ext, delete=False)
            temp_file.write(audio_data)
            temp_file.close()
            return temp_file.name
        except Exception as e:
            logger.error(f"Error saving temporary audio: {str(e)}")
            raise ValueError(f"Failed to save audio data: {str(e)}")

    @staticmethod
    def get_wav_duration(wav_file: str) -> float:
        """Get duration of a WAV file in seconds"""
        try:
            with wave.open(wav_file, 'rb') as wav:
                frames = wav.getnframes()
                rate = wav.getframerate()
                duration = frames / float(rate)
                return duration
        except Exception as e:
            logger.error(f"Error getting WAV duration: {str(e)}")
            return 0.0

    @staticmethod
    def convert_to_pcm(audio_path: str) -> str:
        """Convert audio file to PCM format for Baidu ASR"""
        try:
            # 如果已经是WAV格式，检查是否需要转换
            if audio_path.lower().endswith('.wav'):
                with wave.open(audio_path, 'rb') as wav:
                    if wav.getframerate() == 16000 and wav.getnchannels() == 1:
                        return audio_path  # 已经是合适的格式

            # 创建PCM格式的临时文件
            pcm_path = audio_path.replace('.wav', '_pcm.wav').replace('.mp3', '_pcm.wav')

            # 这里可以使用pydub等库进行格式转换
            # 为简化，暂时直接返回原文件
            return audio_path

        except Exception as e:
            logger.error(f"Error converting to PCM: {str(e)}")
            return audio_path

class EnhancedAudioUtils(AudioUtils):
    """增强的音频工具类，包含格式验证和转换功能"""

    @staticmethod
    def validate_audio_format(audio_path: str) -> dict:
        """验证音频格式是否符合Baidu ASR要求"""
        try:
            import wave
            with wave.open(audio_path, 'rb') as wav:
                format_info = {
                    'channels': wav.getnchannels(),
                    'sample_width': wav.getsampwidth(),
                    'framerate': wav.getframerate(),
                    'frames': wav.getnframes(),
                    'duration': wav.getnframes() / wav.getframerate()
                }

                # 检查Baidu ASR要求
                format_info['baidu_compatible'] = (
                        format_info['channels'] == 1 and  # 单声道
                        format_info['sample_width'] == 2 and  # 16位
                        format_info['framerate'] in [8000, 16000]  # 8kHz或16kHz
                )

                return format_info
        except Exception as e:
            logger.error(f"音频格式验证失败: {e}")
            return None

    @staticmethod
    def convert_to_baidu_format(audio_data: bytes) -> bytes:
        """将音频数据转换为Baidu ASR兼容格式"""
        try:
            # 保存原始音频到临时文件
            with tempfile.NamedTemporaryFile(suffix='.wav', delete=False) as temp_input:
                temp_input.write(audio_data)
                temp_input.flush()
                input_path = temp_input.name

            # 检查当前格式
            format_info = EnhancedAudioUtils.validate_audio_format(input_path)

            if format_info and format_info.get('baidu_compatible'):
                logger.info("音频格式已兼容Baidu ASR")
                os.unlink(input_path)
                return audio_data

            # 使用pydub转换格式
            try:
                from pydub import AudioSegment

                logger.info("正在转换音频格式为Baidu ASR兼容格式...")

                # 加载音频
                audio = AudioSegment.from_file(input_path)

                # 转换为Baidu ASR要求的格式
                audio = audio.set_frame_rate(16000)  # 16kHz
                audio = audio.set_channels(1)  # 单声道
                audio = audio.set_sample_width(2)  # 16位

                # 导出到临时文件
                with tempfile.NamedTemporaryFile(suffix='_converted.wav', delete=False) as temp_output:
                    output_path = temp_output.name

                audio.export(output_path, format="wav")

                # 读取转换后的数据
                with open(output_path, 'rb') as f:
                    converted_data = f.read()

                # 清理临时文件
                os.unlink(input_path)
                os.unlink(output_path)

                logger.info("音频格式转换成功")
                return converted_data

            except ImportError:
                logger.warning("pydub未安装，无法进行音频格式转换")
                logger.warning("请安装: pip install pydub")
                os.unlink(input_path)
                return audio_data

        except Exception as e:
            logger.error(f"音频格式转换失败: {e}")
            return audio_data


class BaiduASR:
    """百度语音识别服务"""

    def __init__(self, app_id: str = BAIDU_APP_ID, api_key: str = BAIDU_API_KEY,
                 secret_key: str = BAIDU_SECRET_KEY):
        self.app_id = app_id
        self.api_key = api_key
        self.secret_key = secret_key

        if AipSpeech and self.is_available():
            self.client = AipSpeech(app_id, api_key, secret_key)
            logger.info(f"初始化百度ASR，App ID: {self.app_id}")
        else:
            self.client = None
            logger.warning("百度ASR初始化失败")

    def is_available(self) -> bool:
        """检查百度ASR是否可用"""
        return bool(AipSpeech and self.app_id and self.api_key and self.secret_key and
                    self.app_id != "your_app_id" and
                    self.api_key != "your_api_key" and
                    self.secret_key != "your_secret_key")

    async def transcribe(self, audio_path: str, dialect: str = DEFAULT_DIALECT) -> Dict[str, Any]:
        """使用百度ASR进行语音识别"""
        if not self.is_available():
            logger.error("百度ASR配置不可用")
            return {"error": "百度ASR配置不可用"}

        try:
            logger.info(f"使用百度ASR进行语音识别: {audio_path}")

            # 映射方言到语言代码
            dev_pid = BAIDU_DIALECT_MAPPING.get(dialect, 1536)

            # 读取音频文件
            with open(audio_path, "rb") as audio_file:
                audio_data = audio_file.read()

            # 百度ASR识别
            result = self.client.asr(
                audio_data,
                'wav',  # 音频格式
                16000,  # 采样率
                {
                    'dev_pid': dev_pid,  # 识别模型
                    'cuid': str(uuid.uuid4()),  # 用户唯一标识
                }
            )

            logger.info(f"百度ASR响应: {result}")

            # 解析结果
            if result.get('err_no') == 0 and 'result' in result:
                transcription = ''.join(result['result'])

                logger.info(f"百度ASR识别成功: {transcription}")
                return {
                    "transcription": transcription,
                    "confidence": 0.8,  # 百度API不返回置信度，使用默认值
                    "dialect": dialect,
                    "language_code": str(dev_pid)
                }
            else:
                error_msg = result.get('err_msg', '识别失败')
                logger.error(f"百度ASR错误: {error_msg}")
                return {"error": f"百度ASR错误: {error_msg}"}

        except Exception as e:
            logger.error(f"百度ASR异常: {str(e)}")
            return {"error": f"百度ASR处理异常: {str(e)}"}

class EnhancedBaiduASR(BaiduASR):
    """增强版Baidu ASR，包含音频格式自动处理"""

    async def transcribe(self, audio_path: str, dialect: str = DEFAULT_DIALECT) -> Dict[str, Any]:
        """增强版转录功能，自动处理音频格式"""
        if not self.is_available():
            logger.error("百度ASR配置不可用")
            return {"error": "百度ASR配置不可用"}

        try:
            logger.info(f"使用增强版百度ASR进行语音识别: {audio_path}")

            # 读取原始音频
            with open(audio_path, "rb") as f:
                original_audio = f.read()

            # 验证和转换音频格式
            converted_audio = EnhancedAudioUtils.convert_to_baidu_format(original_audio)

            # 保存转换后的音频进行处理
            with tempfile.NamedTemporaryFile(suffix='_processed.wav', delete=False) as temp_file:
                temp_file.write(converted_audio)
                temp_file.flush()
                processed_path = temp_file.name

            # 验证转换结果
            format_info = EnhancedAudioUtils.validate_audio_format(processed_path)
            if format_info:
                logger.info(f"处理后音频格式: {format_info['channels']}声道, "
                            f"{format_info['sample_width'] * 8}位, {format_info['framerate']}Hz, "
                            f"{format_info['duration']:.2f}秒")

                if not format_info['baidu_compatible']:
                    logger.warning("音频格式仍不兼容，可能影响识别效果")

            # 执行语音识别
            dev_pid = BAIDU_DIALECT_MAPPING.get(dialect, 1536)

            with open(processed_path, "rb") as audio_file:
                audio_data = audio_file.read()

            # 添加文件大小验证
            file_size_mb = len(audio_data) / (1024 * 1024)
            if file_size_mb > 10:
                logger.error(f"音频文件过大: {file_size_mb:.2f}MB (最大10MB)")
                os.unlink(processed_path)
                return {"error": f"音频文件过大: {file_size_mb:.2f}MB，最大支持10MB"}

            if len(audio_data) < 1024:
                logger.error("音频文件过小")
                os.unlink(processed_path)
                return {"error": "音频文件过小，无法识别"}

            result = self.client.asr(
                audio_data,
                'wav',
                16000,  # 使用标准16kHz
                {
                    'dev_pid': dev_pid,
                    'cuid': str(uuid.uuid4()),
                }
            )

            # 清理临时文件
            os.unlink(processed_path)

            logger.info(f"百度ASR响应: {result}")

            # 增强的错误处理
            if result.get('err_no') == 0 and 'result' in result:
                transcription = ''.join(result['result'])
                logger.info(f"百度ASR识别成功: {transcription}")
                return {
                    "transcription": transcription,
                    "confidence": 0.8,
                    "dialect": dialect,
                    "language_code": str(dev_pid),
                    "audio_format_converted": True  # 标记音频已转换
                }
            else:
                err_no = result.get('err_no', 'unknown')
                err_msg = result.get('err_msg', '识别失败')

                # 详细的错误信息
                error_messages = {
                    3300: "输入错误，请检查音频格式",
                    3301: "音频质量过差，请提供更清晰的音频",
                    3302: "鉴权失败，请检查API密钥配置",
                    3303: "语音服务器后端问题，请稍后重试",
                    3307: "音频识别错误，可能是格式或质量问题",
                    3308: "音频过长，请确保在60秒以内",
                    3309: "音频数据为空",
                    3310: "音频编码错误",
                    3311: "采样率不支持，请使用8000或16000Hz"
                }

                error_detail = error_messages.get(err_no, f"百度ASR错误 ({err_no}): {err_msg}")
                logger.error(error_detail)
                return {"error": error_detail}

        except Exception as e:
            logger.error(f"百度ASR异常: {str(e)}")
            return {"error": f"百度ASR处理异常: {str(e)}"}

class BaiduTTS:
    """百度语音合成服务"""

    def __init__(self, app_id: str = BAIDU_APP_ID, api_key: str = BAIDU_API_KEY,
                 secret_key: str = BAIDU_SECRET_KEY):
        self.app_id = app_id
        self.api_key = api_key
        self.secret_key = secret_key

        if AipSpeech and self.is_available():
            self.client = AipSpeech(app_id, api_key, secret_key)
            logger.info(f"初始化百度TTS，App ID: {self.app_id}")
        else:
            self.client = None
            logger.warning("百度TTS初始化失败")

    def is_available(self) -> bool:
        """检查百度TTS是否可用"""
        return bool(AipSpeech and self.app_id and self.api_key and self.secret_key and
                    self.app_id != "your_app_id" and
                    self.api_key != "your_api_key" and
                    self.secret_key != "your_secret_key")

    def synthesize(self, text: str, dialect: str = DEFAULT_DIALECT,
                   voice_id: Optional[str] = None, speed: float = 1.0) -> Dict[str, Any]:
        """使用百度TTS进行语音合成"""
        if not self.is_available():
            logger.error("百度TTS配置不可用")
            return {"error": "百度TTS配置不可用"}

        try:
            logger.info(f"使用百度TTS合成语音: '{text[:30]}...' (方言: {dialect})")

            # 选择声音
            per = 0  # 默认女声
            if voice_id:
                try:
                    per = int(voice_id)
                except:
                    per = 0

            # 调整语速
            spd = max(0, min(15, int(speed * 5)))  # 百度TTS语速范围0-15

            # 合成语音
            result = self.client.synthesis(
                text,
                'zh',
                1,  # 客户端类型
                {
                    'vol': 5,  # 音量
                    'per': per,  # 发音人
                    'spd': spd,  # 语速
                    'pit': 5,  # 音调
                    'aue': 3,  # 音频编码，3为mp3格式
                }
            )

            # 检查结果
            if not isinstance(result, dict):
                # 成功返回音频数据
                audio_base64 = base64.b64encode(result).decode('utf-8')

                # 估算时长 (简单估算，每个字符约0.5秒)
                estimated_duration = len(text) * 0.5

                logger.info(f"百度TTS合成成功，估算时长: {estimated_duration:.2f}秒")
                return {
                    "audio_base64": audio_base64,
                    "duration": estimated_duration
                }
            else:
                # 错误响应
                error_msg = result.get('err_msg', '合成失败')
                logger.error(f"百度TTS错误: {error_msg}")
                return {"error": f"百度TTS错误: {error_msg}"}

        except Exception as e:
            logger.error(f"百度TTS异常: {str(e)}")
            return {"error": f"百度TTS处理异常: {str(e)}"}


class FallbackTTS:
    """Fallback TTS for when external APIs are not available"""

    def __init__(self):
        pass

    def is_available(self) -> bool:
        """Always available as a fallback"""
        return True

    def synthesize(self, text: str, dialect: str = DEFAULT_DIALECT,
                   voice_id: Optional[str] = None, speed: float = 1.0) -> Dict[str, Any]:
        """Generate a simple beep sound as fallback"""
        try:
            logger.warning(f"使用备用TTS合成语音: '{text[:30]}...'")

            # Generate a simple beep sound (1 second)
            sample_rate = 16000
            duration = 1.0

            # Simple beep at 440 Hz
            t = np.linspace(0, duration, int(sample_rate * duration), False)
            tone = np.sin(440 * 2 * np.pi * t) * 0.3

            # Convert to 16-bit PCM
            audio_int16 = (tone * 32767).astype(np.int16)

            # Create WAV file in memory
            buffer = io.BytesIO()
            with wave.open(buffer, 'wb') as wf:
                wf.setnchannels(1)
                wf.setsampwidth(2)
                wf.setframerate(sample_rate)
                wf.writeframes(audio_int16.tobytes())

            # Convert to base64
            buffer.seek(0)
            audio_base64 = base64.b64encode(buffer.read()).decode('utf-8')

            logger.info("备用TTS完成")
            return {
                "audio_base64": audio_base64,
                "duration": duration
            }

        except Exception as e:
            logger.error(f"备用TTS失败: {str(e)}")
            return {"error": f"备用TTS失败: {str(e)}"}


class DialectDetector:
    """Detect dialect from audio using Baidu ASR"""

    def __init__(self, baidu_asr: Optional[BaiduASR] = None):
        self.baidu_asr = baidu_asr or BaiduASR()

    async def detect_dialect(self, audio_path: str) -> List[DialectInfo]:
        """通过百度ASR检测方言"""
        try:
            logger.info(f"通过百度ASR检测方言: {audio_path}")

            if self.baidu_asr.is_available():
                dialects_to_try = ["mandarin", "cantonese", "english"]
                results = []

                for dialect in dialects_to_try:
                    try:
                        result = await self.baidu_asr.transcribe(audio_path, dialect)
                        if "error" not in result:
                            transcription = result.get("transcription", "")

                            # 计算匹配度 (基于转录文本长度和内容质量)
                            text_score = len(transcription.strip()) / 50.0
                            final_score = min(text_score, 1.0) * 0.8  # 调整置信度

                            results.append(DialectInfo(dialect=dialect, confidence=final_score))

                        await asyncio.sleep(0.1)  # 避免过快请求
                    except Exception as e:
                        logger.warning(f"检测方言 {dialect} 失败: {str(e)}")
                        continue

                if results:
                    results.sort(key=lambda x: x.confidence, reverse=True)
                    return results
                else:
                    return [DialectInfo(dialect="mandarin", confidence=0.5)]
            else:
                logger.warning("百度ASR不可用，使用模拟方言检测")
                return [
                    DialectInfo(dialect="mandarin", confidence=0.85),
                    DialectInfo(dialect="cantonese", confidence=0.10),
                    DialectInfo(dialect="english", confidence=0.05)
                ]

        except Exception as e:
            logger.error(f"方言检测异常: {str(e)}")
            return [DialectInfo(dialect="mandarin", confidence=1.0)]



class SpeechInteractionManager:
    """百度语音交互管理器"""

    def __init__(self):
        # 初始化百度ASR系统
        self.baidu_asr = BaiduASR()

        # 初始化TTS系统 - 百度TTS为主，备用TTS为后备
        self.baidu_tts = BaiduTTS()
        self.fallback_tts = FallbackTTS()

        # 初始化方言检测器
        self.dialect_detector = DialectDetector(self.baidu_asr)

        # 音频工具
        self.audio_utils = AudioUtils()

        logger.info("百度语音交互管理器初始化完成")

    async def process_speech_to_text(self, audio_base64: str,
                                     dialect: str = DEFAULT_DIALECT) -> SpeechRecognitionResponse:
        """使用百度ASR进行语音转文字"""
        try:
            # 解码base64音频
            audio_bytes = self.audio_utils.decode_audio_base64(audio_base64)

            # 保存到临时文件
            audio_path = self.audio_utils.save_temp_audio(audio_bytes)

            # 自动检测方言
            if dialect == "auto":
                dialect_results = await self.dialect_detector.detect_dialect(audio_path)
                dialect = dialect_results[0].dialect

            # 使用百度ASR进行识别
            if self.baidu_asr.is_available():
                result = await self.baidu_asr.transcribe(audio_path, dialect)

                # 清理临时文件
                try:
                    os.unlink(audio_path)
                except:
                    pass

                if "error" in result:
                    logger.error(f"百度ASR失败: {result['error']}")
                    return SpeechRecognitionResponse(
                        transcription="[语音识别失败]",
                        confidence=0.0,
                        dialect=dialect
                    )

                return SpeechRecognitionResponse(**result)

            # 百度ASR不可用
            logger.error("百度ASR系统不可用")
            return SpeechRecognitionResponse(
                transcription="[百度语音识别暂不可用]",
                confidence=0.0,
                dialect=dialect
            )

        except Exception as e:
            logger.error(f"语音转文字处理异常: {str(e)}")
            return SpeechRecognitionResponse(
                transcription=f"[错误: {str(e)}]",
                confidence=0.0,
                dialect=dialect
            )

    async def process_text_to_speech(self, text: str, dialect: str = DEFAULT_DIALECT,
                                     voice_id: Optional[str] = None, speed: float = 1.0) -> SpeechSynthesisResponse:
        """使用百度TTS进行文字转语音"""
        try:
            # 首先尝试百度TTS
            if self.baidu_tts.is_available():
                result = self.baidu_tts.synthesize(text, dialect, voice_id, speed)
                if "error" not in result:
                    return SpeechSynthesisResponse(**result)
                else:
                    logger.error(f"百度TTS失败: {result.get('error')}, 使用备用TTS")

            # 使用备用TTS
            logger.warning("使用备用TTS系统")
            result = self.fallback_tts.synthesize(text, dialect, voice_id, speed)
            return SpeechSynthesisResponse(**result)

        except Exception as e:
            logger.error(f"文字转语音处理异常: {str(e)}")
            return SpeechSynthesisResponse(
                audio_base64="",
                duration=0.0
            )

    async def detect_dialect_from_audio(self, audio_base64: str) -> List[DialectInfo]:
        """从音频检测方言"""
        try:
            # 解码base64音频
            audio_bytes = self.audio_utils.decode_audio_base64(audio_base64)

            # 保存到临时文件
            audio_path = self.audio_utils.save_temp_audio(audio_bytes)

            # 检测方言
            dialect_results = await self.dialect_detector.detect_dialect(audio_path)

            # 清理临时文件
            try:
                os.unlink(audio_path)
            except:
                pass

            return dialect_results

        except Exception as e:
            logger.error(f"方言检测异常: {str(e)}")
            return [DialectInfo(dialect="mandarin", confidence=1.0)]

    def get_available_voices(self, dialect: str = None) -> Dict[str, List[str]]:
        """获取可用的语音列表"""
        voices = {}

        # 获取百度TTS语音
        if self.baidu_tts.is_available():
            for d, v in BAIDU_VOICE_MAPPING.items():
                if dialect is None or d == dialect:
                    voices[f"baidu_{d}"] = [str(voice) for voice in v]

        # 添加备用语音
        voices["fallback"] = ["default"]

        return voices

class EnhancedSpeechInteractionManager(SpeechInteractionManager):
    """增强版语音交互管理器"""

    def __init__(self):
        # 使用增强版ASR
        self.baidu_asr = EnhancedBaiduASR()

        # 保持其他组件不变
        self.baidu_tts = BaiduTTS()
        self.fallback_tts = FallbackTTS()
        self.dialect_detector = DialectDetector(self.baidu_asr)
        self.audio_utils = EnhancedAudioUtils()

        logger.info("增强版百度语音交互管理器初始化完成")


# 创建增强版管理器实例
#enhanced_speech_manager = EnhancedSpeechInteractionManager()

# 为了保持兼容性，创建一个别名
# 这样原有代码可以继续使用 speech_manager
speech_manager = EnhancedSpeechInteractionManager()


# 兼容性函数 - 保持与原有豆包接口一致
def create_baidu_speech_manager():
    """创建百度语音管理器实例"""
    return SpeechInteractionManager()


if __name__ == "__main__":
    # 测试代码
    async def test_baidu_speech():
        """测试百度语音识别"""
        import numpy as np
        import wave

        # 创建测试音频
        sample_rate = 16000
        duration = 2.0
        t = np.linspace(0, duration, int(sample_rate * duration), False)
        audio_signal = np.sin(440 * 2 * np.pi * t) * 0.3
        audio_int16 = (audio_signal * 32767).astype(np.int16)

        test_file = "test_baidu_audio.wav"
        with wave.open(test_file, 'wb') as wf:
            wf.setnchannels(1)
            wf.setsampwidth(2)
            wf.setframerate(sample_rate)
            wf.writeframes(audio_int16.tobytes())

        # 测试语音识别
        manager = speech_manager

        with open(test_file, "rb") as f:
            audio_data = f.read()
        audio_base64 = base64.b64encode(audio_data).decode('utf-8')

        result = await manager.process_speech_to_text(audio_base64, "mandarin")
        print(f"识别结果: {result}")

        # 测试语音合成
        tts_result = await manager.process_text_to_speech("这是百度语音合成测试", "mandarin")
        print(f"合成结果: 音频长度={len(tts_result.audio_base64)}, 时长={tts_result.duration}")

        # 清理测试文件
        try:
            os.remove(test_file)
        except:
            pass


    # 运行测试
    asyncio.run(test_baidu_speech())