# speech_interaction.py - Hybrid Speech Interaction Module (DeepSeek STT + Xunfei TTS)
import io
import os
import logging
import tempfile
import json
import base64
import asyncio
import requests
import numpy as np
import time
import hashlib
import hmac
import uuid
import wave
from typing import Dict, Any, List, Optional, Union, Tuple
from pydantic import BaseModel
from urllib.parse import urlencode
from datetime import datetime
from dotenv import load_dotenv

# Load environment variables
load_dotenv()
# DeepSeek API credentials
DEEPSEEK_API_KEY = os.getenv("DEEPSEEK_API_KEY", "sk-c615331ede0b45848a3f42b2e93b4a0a")
DEEPSEEK_API_URL = os.getenv("DEEPSEEK_API_URL", "http://10.2.8.77:3000")

# Xunfei API credentials
XUNFEI_APPID = os.getenv("XUNFEI_APPID", "ad9b50b0")
XUNFEI_API_KEY = os.getenv("XUNFEI_API_KEY", "36f812b6d46230339aec1534edf11e8e")
XUNFEI_API_SECRET = os.getenv("XUNFEI_API_SECRET", "Yjk3MDJlZmExMjNiNWRlNWNjZTk1ZjFl")
XUNFEI_TTS_HOST = "tts-api.xfyun.cn"

# Set up logging
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
)
logger = logging.getLogger(__name__)

# Constants
SUPPORTED_DIALECTS = ["mandarin", "cantonese", "sichuan", "standard"]
DEFAULT_DIALECT = "mandarin"
AUDIO_SAMPLE_RATE = 16000
MAX_AUDIO_LENGTH = 60  # seconds

# Dialect mapping for DeepSeek API
DEEPSEEK_DIALECT_MAPPING = {
    "mandarin": "zh",  # 普通话
    "cantonese": "zh-yue",  # 粤语
    "sichuan": "zh-sc",  # 四川话
    "standard": "zh"  # 标准中文
}

# Dialect mapping for Xunfei API
XUNFEI_DIALECT_MAPPING = {
    "mandarin": {"asr": "", "tts": "mandarin"},  # 普通话
    "cantonese": {"asr": "cantonese", "tts": "cantonese"},  # 粤语
    "sichuan": {"asr": "sichuan", "tts": "sichuan"},  # 四川话
    "standard": {"asr": "", "tts": "mandarin"}  # 标准中文
}

# Voice mapping for Xunfei TTS
XUNFEI_VOICE_MAPPING = {
    "mandarin": [
        "xiaoyan", "aisjiuxu", "aisxping", "aisjinger", "aisbabyxu"
    ],
    "cantonese": [
        "aisxgangna", "aisxhong"
    ],
    "sichuan": [
        "aiscdanqi", "aiscmeixia"
    ]
}


# Models for request/response
class SpeechRecognitionRequest(BaseModel):
    audio_base64: str
    dialect: Optional[str] = DEFAULT_DIALECT


class SpeechSynthesisRequest(BaseModel):
    text: str
    dialect: Optional[str] = DEFAULT_DIALECT
    voice_id: Optional[str] = None
    speed: Optional[float] = 1.0


class DialectDetectionRequest(BaseModel):
    audio_base64: str


class SpeechRecognitionResponse(BaseModel):
    transcription: str
    confidence: Optional[float] = None
    dialect: Optional[str] = None
    language_code: Optional[str] = None


class SpeechSynthesisResponse(BaseModel):
    audio_base64: str
    duration: Optional[float] = None


class DialectInfo(BaseModel):
    dialect: str
    confidence: float


class AudioUtils:
    """Utility functions for audio processing"""

    @staticmethod
    def decode_audio_base64(audio_base64: str) -> bytes:
        """Decode base64 audio to bytes"""
        try:
            audio_bytes = base64.b64decode(audio_base64)
            return audio_bytes
        except Exception as e:
            logger.error(f"Error decoding audio: {str(e)}")
            raise ValueError(f"Invalid audio data: {str(e)}")

    @staticmethod
    def encode_audio_base64(audio_bytes: bytes) -> str:
        """Encode audio bytes to base64"""
        try:
            audio_base64 = base64.b64encode(audio_bytes).decode('utf-8')
            return audio_base64
        except Exception as e:
            logger.error(f"Error encoding audio: {str(e)}")
            raise ValueError(f"Invalid audio data: {str(e)}")

    @staticmethod
    def save_temp_audio(audio_data: bytes, file_ext: str = ".wav") -> str:
        """Save audio data to temporary file"""
        try:
            temp_file = tempfile.NamedTemporaryFile(suffix=file_ext, delete=False)
            temp_file.write(audio_data)
            temp_file.close()
            return temp_file.name
        except Exception as e:
            logger.error(f"Error saving temporary audio: {str(e)}")
            raise ValueError(f"Failed to save audio data: {str(e)}")

    @staticmethod
    def get_wav_duration(wav_file: str) -> float:
        """Get duration of a WAV file in seconds"""
        try:
            with wave.open(wav_file, 'rb') as wav:
                frames = wav.getnframes()
                rate = wav.getframerate()
                duration = frames / float(rate)
                return duration
        except Exception as e:
            logger.error(f"Error getting WAV duration: {str(e)}")
            return 0.0


class DeepSeekASR:
    """Speech recognition using DeepSeek ASR API"""

    def __init__(self, api_key: str = DEEPSEEK_API_KEY, api_url: str = DEEPSEEK_API_URL):
        self.api_key = api_key
        self.api_url = api_url
        self.endpoint = f"{api_url}/v1/audio/speech_recognition"

    def is_available(self) -> bool:
        """Check if DeepSeek ASR API is available"""
        return bool(self.api_key) and bool(self.api_url)

    async def transcribe(self, audio_path: str, dialect: str = DEFAULT_DIALECT) -> Dict[str, Any]:
        """Transcribe audio file using DeepSeek ASR API"""
        if not self.is_available():
            logger.error("DeepSeek ASR API key not available")
            return {"error": "ASR API not available"}

        try:
            logger.info(f"Transcribing audio with DeepSeek ASR: {audio_path}")

            # Map dialect to language code
            language = DEEPSEEK_DIALECT_MAPPING.get(dialect, "zh")

            # Read audio file
            with open(audio_path, "rb") as audio_file:
                audio_data = audio_file.read()

            # Convert to base64
            audio_base64 = base64.b64encode(audio_data).decode('utf-8')

            # Prepare API request
            headers = {
                "Authorization": f"Bearer {self.api_key}",
                "Content-Type": "application/json"
            }

            payload = {
                "model": "deepseek-asr",
                "audio": audio_base64,
                "language": language,
                "response_format": "text"
            }

            # Send API request
            response = requests.post(
                self.endpoint,
                headers=headers,
                json=payload
            )

            # Check for errors
            if response.status_code != 200:
                logger.error(f"DeepSeek ASR API error: {response.status_code} - {response.text}")
                return {"error": f"DeepSeek ASR API error: {response.status_code}"}

            # Process response
            result = response.json()
            transcription = result.get("text", "")

            # DeepSeek API doesn't provide confidence, so we use a default
            default_confidence = 0.9

            logger.info(f"DeepSeek ASR transcription completed: {transcription}")
            return {
                "transcription": transcription,
                "confidence": default_confidence,
                "dialect": dialect,
                "language_code": language
            }

        except Exception as e:
            logger.error(f"Error during DeepSeek ASR transcription: {str(e)}")
            return {"error": f"DeepSeek ASR transcription failed: {str(e)}"}


class XunfeiTTS:
    """Text-to-speech using iFLYTEK TTS API"""

    def __init__(self, app_id: str = XUNFEI_APPID, api_key: str = XUNFEI_API_KEY, api_secret: str = XUNFEI_API_SECRET):
        self.app_id = app_id
        self.api_key = api_key
        self.api_secret = api_secret
        self.host = XUNFEI_TTS_HOST
        self.path = "/v2/tts"
        self.url = f"https://{self.host}{self.path}"
        self.dialect_voices = XUNFEI_VOICE_MAPPING

    def is_available(self) -> bool:
        """Check if Xunfei TTS is available"""
        return bool(self.app_id and self.api_key and self.api_secret)

    def _create_auth_params(self) -> Dict[str, str]:
        """Create authentication parameters for TTS API"""
        # Current UTC time
        now = int(time.time())

        # Create signature
        signature_origin = f"{self.api_key}{now}{self.api_secret}"
        signature_sha = hashlib.md5(signature_origin.encode('utf-8')).hexdigest()

        # Create parameters
        auth_params = {
            'app_id': self.app_id,
            'api_key': self.api_key,
            'timestamp': str(now),
            'signature': signature_sha
        }

        return auth_params

    def synthesize(self, text: str, dialect: str = DEFAULT_DIALECT,
                   voice_id: Optional[str] = None, speed: float = 1.0) -> Dict[str, Any]:
        """Synthesize speech from text using Xunfei TTS"""
        if not self.is_available():
            logger.error("Xunfei TTS credentials not available")
            return {"error": "TTS API not available"}

        try:
            logger.info(f"Synthesizing speech with Xunfei TTS: '{text[:30]}...' in {dialect} dialect")

            # Select voice based on dialect if not specified
            if not voice_id:
                available_voices = self.dialect_voices.get(dialect, self.dialect_voices["mandarin"])
                voice_id = available_voices[0]  # Default to first voice

            # Get authentication parameters
            auth_params = self._create_auth_params()

            # Prepare TTS parameters
            tts_params = {
                "common": {
                    "app_id": self.app_id
                },
                "business": {
                    "aue": "raw",  # Audio format: raw PCM
                    "sfl": 1,  # Enable streaming
                    "auf": "audio/L16;rate=16000",  # Audio format details
                    "vcn": voice_id,  # Voice
                    "speed": int(speed * 50),  # Speed, 0-100, default 50
                    "volume": 50,  # Volume, 0-100
                    "pitch": 50,  # Pitch, 0-100
                    "bgs": 0,  # Background sound, 0=disable
                    "tte": "UTF8"  # Text encoding
                },
                "data": {
                    "text": base64.b64encode(text.encode('utf-8')).decode('utf-8'),
                    "status": 2  # 2=final
                }
            }

            # Make API request
            headers = {
                'Content-Type': 'application/json',
                'Host': self.host,
                'Connection': 'keep-alive'
            }

            response = requests.post(
                self.url,
                params=auth_params,
                json=tts_params,
                headers=headers
            )

            # Process response
            if response.status_code != 200:
                logger.error(f"TTS API error: {response.text}")
                return {"error": f"TTS API error: {response.status_code}"}

            result = response.json()
            if result.get("code") != 0:
                logger.error(f"TTS API error: {result}")
                return {"error": f"TTS API error: {result.get('message', 'Unknown error')}"}

            # Extract audio data
            audio_data = base64.b64decode(result["data"]["audio"])

            # Save to temporary file to get duration
            temp_file = self.save_temp_audio(audio_data)
            duration = AudioUtils.get_wav_duration(temp_file)
            os.unlink(temp_file)  # Clean up

            # Convert to base64
            audio_base64 = base64.b64encode(audio_data).decode('utf-8')

            logger.info(f"Xunfei TTS speech synthesis completed, duration: {duration:.2f}s")
            return {
                "audio_base64": audio_base64,
                "duration": duration
            }

        except Exception as e:
            logger.error(f"Error during Xunfei TTS speech synthesis: {str(e)}")
            return {"error": f"Xunfei TTS speech synthesis failed: {str(e)}"}

    def save_temp_audio(self, audio_data: bytes) -> str:
        """Save audio data to temporary file"""
        temp_file = tempfile.NamedTemporaryFile(suffix=".wav", delete=False)
        temp_file.write(audio_data)
        temp_file.close()
        return temp_file.name


class FallbackTTS:
    """Fallback TTS for when external APIs are not available"""

    def __init__(self):
        pass

    def is_available(self) -> bool:
        """Always available as a fallback"""
        return True

    def synthesize(self, text: str, dialect: str = DEFAULT_DIALECT,
                   voice_id: Optional[str] = None, speed: float = 1.0) -> Dict[str, Any]:
        """Generate a simple beep sound as fallback"""
        try:
            logger.warning(f"Using fallback TTS for text: '{text[:30]}...'")

            # Generate a simple beep sound (1 second)
            sample_rate = 16000
            duration = 1.0  # seconds

            # Simple beep at 440 Hz
            t = np.linspace(0, duration, int(sample_rate * duration), False)
            tone = np.sin(440 * 2 * np.pi * t) * 0.3

            # Convert to 16-bit PCM
            audio_int16 = (tone * 32767).astype(np.int16)

            # Create WAV file in memory
            buffer = io.BytesIO()
            with wave.open(buffer, 'wb') as wf:
                wf.setnchannels(1)
                wf.setsampwidth(2)
                wf.setframerate(sample_rate)
                wf.writeframes(audio_int16.tobytes())

            # Convert to base64
            buffer.seek(0)
            audio_base64 = base64.b64encode(buffer.read()).decode('utf-8')

            logger.info("Fallback TTS completed with beep sound")
            return {
                "audio_base64": audio_base64,
                "duration": duration
            }

        except Exception as e:
            logger.error(f"Error in fallback TTS: {str(e)}")
            return {"error": f"Fallback TTS failed: {str(e)}"}


class DialectDetector:
    """Detect dialect from audio"""

    def __init__(self, deepseek_asr: Optional[DeepSeekASR] = None):
        self.deepseek_asr = deepseek_asr or DeepSeekASR()

    async def detect_dialect(self, audio_path: str) -> List[DialectInfo]:
        """Detect dialect from audio file"""
        try:
            logger.info(f"Detecting dialect from audio: {audio_path}")

            # If DeepSeek ASR is available, use it for dialect detection
            if self.deepseek_asr.is_available():
                # Try different dialects and compare results
                dialects_to_try = ["mandarin", "cantonese", "sichuan"]

                # For now, we'll simulate dialect detection since DeepSeek
                # may not have direct dialect confidence scores

                # Get transcription with default dialect (mandarin)
                result = await self.deepseek_asr.transcribe(audio_path, "mandarin")
                transcription = result.get("transcription", "")

                # Simple heuristic detection based on character patterns
                # This is a simplified approach - in production you'd use more sophisticated methods
                cantonese_score = 0.1
                sichuan_score = 0.1
                mandarin_score = 0.8

                # Cantonese often uses these characters more frequently
                cantonese_chars = ["嘅", "啲", "喺", "唔", "咁", "咩", "嘢", "佢"]
                for char in cantonese_chars:
                    if char in transcription:
                        cantonese_score += 0.1
                        mandarin_score -= 0.05

                # Sichuan dialect markers
                sichuan_chars = ["哈", "咯", "嘛", "安", "得", "哦"]
                sichuan_patterns = ["得很", "安逸", "要得"]

                for char in sichuan_chars:
                    if char in transcription:
                        sichuan_score += 0.05
                        mandarin_score -= 0.02

                for pattern in sichuan_patterns:
                    if pattern in transcription:
                        sichuan_score += 0.1
                        mandarin_score -= 0.05

                # Normalize scores
                total = cantonese_score + sichuan_score + mandarin_score
                cantonese_score /= total
                sichuan_score /= total
                mandarin_score /= total

                # Create results
                results = [
                    DialectInfo(dialect="mandarin", confidence=mandarin_score),
                    DialectInfo(dialect="cantonese", confidence=cantonese_score),
                    DialectInfo(dialect="sichuan", confidence=sichuan_score)
                ]

                # Sort by confidence
                results.sort(key=lambda x: x.confidence, reverse=True)
                logger.info(f"Dialect detection completed: {results[0].dialect} ({results[0].confidence:.2f})")
                return results
            else:
                # Fallback: simple simulation
                logger.warning("Using simulated dialect detection (DeepSeek ASR not available)")

                # Simulated dialect detection results
                results = [
                    DialectInfo(dialect="mandarin", confidence=0.85),
                    DialectInfo(dialect="cantonese", confidence=0.10),
                    DialectInfo(dialect="sichuan", confidence=0.05)
                ]

                logger.info(f"Simulated dialect detection: {results[0].dialect} ({results[0].confidence:.2f})")
                return results

        except Exception as e:
            logger.error(f"Error during dialect detection: {str(e)}")
            return [DialectInfo(dialect="standard", confidence=1.0)]  # Fallback to standard


class SpeechInteractionManager:
    """Main manager for speech interaction"""

    def __init__(self):
        # Initialize ASR systems - Primary: DeepSeek
        self.deepseek_asr = DeepSeekASR()

        # Initialize TTS systems - Primary: Xunfei
        self.xunfei_tts = XunfeiTTS()
        self.fallback_tts = FallbackTTS()

        # Initialize dialect detector
        self.dialect_detector = DialectDetector(self.deepseek_asr)

        # Audio utilities
        self.audio_utils = AudioUtils()

        logger.info("Speech Interaction Manager initialized with DeepSeek ASR and Xunfei TTS")

    async def process_speech_to_text(self, audio_base64: str,
                                     dialect: str = DEFAULT_DIALECT) -> SpeechRecognitionResponse:
        """Process speech to text using DeepSeek ASR"""
        try:
            # Decode audio from base64
            audio_bytes = self.audio_utils.decode_audio_base64(audio_base64)

            # Save to temporary file
            audio_path = self.audio_utils.save_temp_audio(audio_bytes)

            # Try to detect dialect if set to auto
            if dialect == "auto":
                dialect_results = await self.dialect_detector.detect_dialect(audio_path)
                dialect = dialect_results[0].dialect  # Use the most likely dialect

            # Use DeepSeek ASR (primary)
            if self.deepseek_asr.is_available():
                result = await self.deepseek_asr.transcribe(audio_path, dialect)
                # Clean up temporary file
                try:
                    os.unlink(audio_path)
                except:
                    pass

                if "error" in result:
                    logger.error(f"DeepSeek ASR failed: {result['error']}")
                    return SpeechRecognitionResponse(
                        transcription="[语音识别失败]",
                        confidence=0.0,
                        dialect=dialect
                    )

                return SpeechRecognitionResponse(**result)

            # If ASR system is not available
            logger.error("No ASR system available")
            return SpeechRecognitionResponse(
                transcription="[语音识别暂不可用]",
                confidence=0.0,
                dialect=dialect
            )

        except Exception as e:
            logger.error(f"Error processing speech to text: {str(e)}")
            return SpeechRecognitionResponse(
                transcription=f"[错误: {str(e)}]",
                confidence=0.0,
                dialect=dialect
            )

    async def process_text_to_speech(self, text: str, dialect: str = DEFAULT_DIALECT,
                                     voice_id: Optional[str] = None, speed: float = 1.0) -> SpeechSynthesisResponse:
        """Process text to speech using Xunfei TTS"""
        try:
            # Use Xunfei TTS (primary)
            if self.xunfei_tts.is_available():
                result = self.xunfei_tts.synthesize(text, dialect, voice_id, speed)
                if "error" not in result:
                    return SpeechSynthesisResponse(**result)
                else:
                    logger.error(f"Xunfei TTS failed: {result.get('error')}, falling back")

            # Use fallback TTS as last resort
            logger.warning("Using fallback TTS system")
            result = self.fallback_tts.synthesize(text, dialect, voice_id, speed)
            return SpeechSynthesisResponse(**result)

        except Exception as e:
            logger.error(f"Error processing text to speech: {str(e)}")
            # Return an empty response
            return SpeechSynthesisResponse(
                audio_base64="",
                duration=0.0
            )

    async def detect_dialect_from_audio(self, audio_base64: str) -> List[DialectInfo]:
        """Detect dialect from audio"""
        try:
            # Decode audio from base64
            audio_bytes = self.audio_utils.decode_audio_base64(audio_base64)

            # Save to temporary file
            audio_path = self.audio_utils.save_temp_audio(audio_bytes)

            # Detect dialect
            dialect_results = await self.dialect_detector.detect_dialect(audio_path)

            # Clean up temporary file
            try:
                os.unlink(audio_path)
            except:
                pass

            return dialect_results

        except Exception as e:
            logger.error(f"Error detecting dialect: {str(e)}")
            return [DialectInfo(dialect="standard", confidence=1.0)]  # Fallback to standard

    def get_available_voices(self, dialect: str = None) -> Dict[str, List[str]]:
        """Get available voices for each dialect"""
        voices = {}

        # Get Xunfei voices
        if self.xunfei_tts.is_available():
            for d, v in self.xunfei_tts.dialect_voices.items():
                if dialect is None or d == dialect:
                    voices[f"xunfei_{d}"] = v

        # Add fallback voice
        voices["fallback"] = ["default"]

        return voices


# Create singleton instance
speech_manager = SpeechInteractionManager()
