#!/usr/bin/env python3
"""
Real-time speech translation client using WebSocket protocol.

This module implements a client for real-time speech-to-speech translation service
using WebSocket connections. It supports audio streaming, protocol buffer
serialization, and handles bidirectional communication with the translation server.

Typical usage example:
    conf = Config(
        ws_url="wss://example.com/api/translate",
        app_key="your_app_key",
        access_key="your_access_key", 
        resource_id="your_resource_id"
    )
    await translate_v4(conf, "audio.wav", 1)

Attributes:
    Config: Configuration dataclass for WebSocket connection parameters.
    Audio: Audio metadata and binary data container.
    TranslateRequestData: Request data structure for translation requests.
    TranslateResponseData: Response data structure for translation responses.
"""

import asyncio
import uuid
import os
from pathlib import Path
from dataclasses import dataclass
import logging
from typing import Optional, List
import websockets
from websockets import Headers
import sys
import time
import json
from google.protobuf.json_format import MessageToDict
from websockets.legacy.exceptions import InvalidStatusCode

# Configure module path for protobuf imports
current_dir = os.path.dirname(os.path.abspath(__file__))
protogen_dir = os.path.join(current_dir, "python_protogen")
sys.path.append(protogen_dir)

# Import protobuf generated modules
from products.understanding.ast.ast_service_pb2 import TranslateRequest, ReqParams, TranslateResponse
from common.events_pb2 import Type

# Configuration
@dataclass
class Config:
    """Configuration parameters for WebSocket connection to translation service.
    
    This dataclass holds all necessary configuration parameters required to 
    establish authenticated WebSocket connections with the translation API.
    
    Attributes:
        ws_url: WebSocket URL for the translation service endpoint.
        app_key: Application key for API authentication.
        access_key: Access key for API authentication. 
        resource_id: Resource identifier specifying the translation service type.
    """
    ws_url: str
    app_key: str
    access_key: str
    resource_id: str


@dataclass
class Audio:
    """Audio data container with metadata and binary content.
    
    This dataclass encapsulates audio-related information including format
    specifications and binary audio data for transmission over WebSocket.
    
    Attributes:
        format: Audio format specification (e.g., "wav", "ogg_opus").
        rate: Audio sampling rate in Hz (e.g., 16000, 24000).
        bits: Bit depth for audio samples (optional).
        channel: Number of audio channels (optional).
        binary_data: Raw audio data as bytes (optional).
    """
    format: str = None
    rate: int = None
    bits: Optional[int] = None
    channel: Optional[int] = None
    binary_data: Optional[bytes] = None


@dataclass
class TranslateRequestData:
    """Translation request data structure for WebSocket communication.
    
    This dataclass represents a translation request containing session
    information, event type, and optional audio data for source and target.
    
    Attributes:
        session_id: Unique identifier for the translation session.
        event: Event type (e.g., "Type_StartSession", "Type_TaskRequest", "Type_FinishSession").
        source_audio: Source audio data and metadata (optional).
        target_audio: Target audio data and metadata (optional).
        mode: Translation mode (e.g., "s2s" for speech-to-speech) (optional).
        source_language: Source language code (e.g., "zh") (optional).
        target_language: Target language code (e.g., "en") (optional).
    """
    session_id: str
    event: str
    source_audio: Optional[Audio] = None
    target_audio: Optional[Audio] = None
    mode: Optional[str] = None
    source_language: Optional[str] = None
    target_language: Optional[str] = None


@dataclass
class TranslateResponseData:
    """Translation response data structure from WebSocket server.
    
    This dataclass represents a translation response containing event
    information, session details, translated text, and audio data.
    
    Attributes:
        event: Response event type from the server.
        session_id: Session identifier associated with this response.
        sequence: Sequence number for ordering responses.
        text: Translated text content.
        data: Audio data bytes from translation.
        spk_chg: Speaker change indicator.
        message: Optional message or error information.
    """
    event: str
    session_id: str
    sequence: int
    text: str
    data: bytes
    spk_chg: bool
    message: str = None

async def main(audio_stream:str):
    """Demonstrate real-time speech translation with example configuration.
    
    Example usage showing how to configure and execute speech translation.
    Measures and logs total execution time for performance monitoring.
    
    Configuration uses ByteDance OpenSpeech API with Chinese to English
    speech-to-speech translation. Ensure valid credentials are provided.
    
    Raises:
        Exception: Propagates any exceptions from translation process.
    """
    # Configure translation service connection
    conf = Config(
        ws_url="wss://openspeech.bytedance.com/api/v4/ast/v2/translate",
        app_key="3448079381",
        access_key="HShSuL8ulFuLrMU0d4W4gIbJUpwr34UO",
        resource_id="volc.service_type.10053"
    )
    
    # Execute translation with performance measurement
    start_time = time.time()
    translation_task = asyncio.create_task(realtime_speech_translation(conf, audio_stream, 1))
    await translation_task # 若异步任务translation_task还没完成，就在这里等等它。
    end_time = time.time()
    
    # Log performance metrics
    execution_time = end_time - start_time
    logging.info(f"Translation completed in {execution_time:.3f} seconds")

async def realtime_speech_translation(conf: Config, audio_path: str, n: int, out_dir: str = "output"):
    """Execute real-time speech-to-speech translation using WebSocket protocol.
    
    Main translation function that orchestrates the entire translation process:
    1. Reads and chunks audio file
    2. Establishes authenticated WebSocket connection
    3. Initiates translation session
    4. Streams audio chunks to server
    5. Receives and processes translation responses
    6. Saves translated audio and text results
    
    Args:
        conf: Configuration object with API credentials and connection details.
        audio_path: Path to source audio file for translation.
        n: Sequence number for output file naming.
        out_dir: Directory path for saving translation results (default: "output").
        
    Returns:
        None. Results are saved to files in the specified output directory.
        
    Raises:
        Various exceptions are caught and logged, function returns gracefully on error.
        
    Note:
        Uses 3200-byte chunks (approximately 100ms of audio) for streaming.
        Supports Chinese ("zh") to English ("en") translation by default.
    """
    # Read audio file into chunks for streaming
    try:
        audio_chunks = await read_audio_chunks(audio_path, 3200)  # 100ms chunks
        logging.info(f"Successfully read {len(audio_chunks)} chunks from {audio_path}")
    except Exception as e:
        logging.error(f"Failed to read audio chunks from file {audio_path}: {e}")
        return

    # Establish WebSocket connection to translation server
    try:
        conn_id = str(uuid.uuid4())
        headers = await build_http_headers(conf, conn_id)
        conn = await websockets.connect(
            conf.ws_url,
            additional_headers=headers,
            max_size=1000000000,  # 1GB max message size
            ping_interval=None    # Disable ping to prevent timeout issues
        )
        log_id = conn.response.headers.get('X-Tt-Logid')
        logging.info(f"Connected to server (log_id={log_id}, conn_id={conn_id}, task={n})")
    except Exception as e:
        logging.error(f"Connection failed: {e}")
        if hasattr(e, 'response') and hasattr(e.response, 'body'):
            logging.error(f"Response body: {e.response.body}")
        if hasattr(e, 'args') and len(e.args) > 0 and hasattr(e.args[0], 'headers'):
            logging.error(f"Response logid: {e.args[0].headers.get('X-Tt-Logid', 'unknown')}")
        return

    # Generate unique session identifier
    session_id = str(uuid.uuid4())

    # Create session initialization request
    start_request = TranslateRequestData(
        session_id=session_id,
        event="Type_StartSession",
        source_audio=Audio(format="wav", rate=16000, bits=16, channel=1),
        target_audio=Audio(format="ogg_opus", rate=24000),
        mode="s2s",
        source_language="zh",
        target_language="en"
    )

    # Send session start request and validate response
    try:
        await send_request(conn, start_request)
        resp = await receive_message(conn)
        
        # Validate that session started successfully
        if resp.event != Type.SessionStarted:
            logging.error(f"Session start failed (log_id={log_id})")
            logging.error(f"Unexpected response event: {resp.event}")
            logging.error(f"Response message: {resp.message}")
            await conn.close()
            return
            
        logging.info(f"Translation session started successfully (ID={session_id})")
    except Exception as e:
        logging.error(f"Failed to start translation session: {e}")
        await conn.close()
        return

    # Define audio streaming coroutine
    async def send_audio_chunks():
        """Stream audio chunks to translation server with timing control.
        
        Sends audio data in chunks with 100ms delays to simulate real-time
        streaming. Concludes with session termination request.
        
        Raises:
            Exception: For network or serialization errors during chunk transmission.
        """
        try:
            # Stream each audio chunk with timing control
            for i, chunk in enumerate(audio_chunks):
                logging.info(f"Sending audio chunk {i+1}/{len(audio_chunks)}: {len(chunk)} bytes")
                chunk_request = TranslateRequestData(
                    session_id=session_id,
                    event="Type_TaskRequest",
                    source_audio=Audio(binary_data=chunk)
                )
                await send_request(conn, chunk_request)
                await asyncio.sleep(0.1)  # 100ms delay for real-time simulation
            
            # Send session completion request
            finish_request = TranslateRequestData(
                session_id=session_id,
                event="Type_FinishSession",
                source_audio=Audio()  # Empty audio to signal completion
            )
            await send_request(conn, finish_request)
            logging.info("Session completion request sent successfully")
        except Exception as e:
            logging.error(f"Error during audio chunk transmission: {e}")
            raise

    # Launch audio streaming task
    sender_task = asyncio.create_task(send_audio_chunks())

    # Initialize response collection buffers
    recv_audio = bytearray()  # Accumulated audio data
    recv_text = []  # Accumulated text translations

    # Process incoming translation responses
    try:
        while True:
            resp = await receive_message(conn)

            # Log response details for monitoring
            logging.info(
                f"Received translation response (event={resp.event}, session={resp.session_id}): "
                f"sequence={resp.sequence}, text_length={len(resp.text)}, "
                f"audio_length={len(resp.data)}, speaker_change={resp.spk_chg}"
            )
            
            # Handle error conditions
            if resp.event in (Type.SessionFailed, Type.SessionCanceled):
                logging.error(
                    f"Translation session failed (log_id={log_id}): "
                    f"event={resp.event}, message={resp.message}"
                )
                raise RuntimeError(f"Session failed: {resp.message}")
            
            # Check for session completion
            if resp.event == Type.SessionFinished:
                logging.info("Translation session completed successfully")
                break
                
            # Accumulate response data (skip usage statistics)
            if resp.event != Type.UsageResponse:
                recv_audio.extend(resp.data)
                recv_text.append(resp.text)
                
    except Exception as e:
        logging.error(f"Error during response processing: {e}")
    finally:
        # Ensure audio streaming task completes before cleanup
        await sender_task
        await conn.close()
        logging.info("WebSocket connection closed")

    # Save translation results to files
    if recv_audio:
        # Ensure output directory exists
        os.makedirs(out_dir, exist_ok=True)
        output_path = Path(out_dir) / f"translate_audio_{n:05}.opus"
        
        try:
            # Write translated audio data
            with open(output_path, 'wb') as f:
                f.write(recv_audio)
            
            # Combine all text translations
            combined_text = ' '.join(recv_text)
            
            logging.info(f"Translation completed successfully")
            logging.info(f"Audio saved to: {output_path}")
            logging.info(f"Translated text: {combined_text}")
            
        except Exception as e:
            logging.error(f"Failed to save translation results: {e}")
    else:
        logging.error("Translation completed but no audio data was received")

async def read_audio_chunks(audio_path: str, chunk_size: int) -> List[bytes]:
    """Read audio file in fixed-size chunks for streaming.
    
    Asynchronously reads an audio file and splits it into chunks of specified size.
    Used for streaming audio data to the translation service in manageable segments.
    
    Args:
        audio_path: Path to the audio file to be read.
        chunk_size: Size of each chunk in bytes.
        
    Returns:
        List of bytes objects, each representing a chunk of the audio file.
        
    Raises:
        FileNotFoundError: If the specified audio file does not exist.
        IOError: If there are issues reading the file.
    """
    chunks = []
    with open(audio_path, 'rb') as f:
        while True:
            chunk = f.read(chunk_size)
            if not chunk:
                break
            chunks.append(chunk)
    return chunks #【'audio_chunk:byte:3200bytes',....】

async def build_http_headers(conf: Config, conn_id: str) -> Headers:
    """Build HTTP headers for WebSocket connection authentication.
    其实就是做一个特殊的火山方舟http请求头
    Constructs authentication headers required by the translation service API.
    Includes application key, access key, resource ID, and connection ID.
    
    Args:
        conf: Configuration object containing API credentials.
        conn_id: Unique connection identifier for this WebSocket session.
        
    Returns:
        Headers object containing authentication and identification headers.
        
    Example:
        headers = await build_http_headers(config, "conn-123-uuid")
        # Returns: Headers with X-Api-App-Key, X-Api-Access-Key, etc.
    """
    headers = Headers({
        "X-Api-App-Key": conf.app_key,
        "X-Api-Access-Key": conf.access_key,
        "X-Api-Resource-Id": conf.resource_id,
        "X-Api-Connect-Id": conn_id
    })
    return headers

async def send_request(ws, request: TranslateRequestData):
    """Send translation request to WebSocket server using protobuf serialization.
    
    Converts TranslateRequestData to protobuf format and sends it over WebSocket.
    Handles different event types and configures audio parameters for translation.
    
    Args:
        ws: WebSocket connection object.
        request: TranslateRequestData object containing request information.
        
    Raises:
        websockets.exceptions.ConnectionClosed: If WebSocket connection is closed.
        Exception: For serialization or network errors.
    """
    # Create protobuf request object
    request_data = TranslateRequest()
    request_data.request_meta.SessionID = request.session_id
    
    # Map string event types to protobuf enum values
    if request.event == "Type_StartSession":
        request_data.event = Type.StartSession
    elif request.event == "Type_TaskRequest":
        request_data.event = Type.TaskRequest
    elif request.event == "Type_FinishSession":
        request_data.event = Type.FinishSession
    
    # Set user identification
    request_data.user.uid = "ast_py_client"
    request_data.user.did = "ast_py_client"
    
    # Configure source audio parameters (default to WAV format)
    request_data.source_audio.format = "wav"
    request_data.source_audio.rate = 16000
    request_data.source_audio.bits = 16
    request_data.source_audio.channel = 1
    
    # Include binary audio data if provided
    if request.source_audio.binary_data:
        request_data.source_audio.binary_data = request.source_audio.binary_data
    
    # Configure target audio parameters (OGG Opus format)
    request_data.target_audio.format = "ogg_opus"
    request_data.target_audio.rate = 24000
    
    # Set translation parameters
    request_data.request.mode = "s2s"  # Speech-to-speech mode
    request_data.request.source_language = "zh"  # Chinese source
    request_data.request.target_language = "en"  # English target
    
    # Serialize and send over WebSocket
    await ws.send(request_data.SerializeToString())

async def receive_message(ws) -> TranslateResponseData:
    """Receive and parse translation response from WebSocket server.
    
    Receives protobuf response from server, deserializes it, and converts to
    TranslateResponseData format. Handles special formatting for usage responses.
    
    Args:
        ws: WebSocket connection object.
        
    Returns:
        TranslateResponseData object containing parsed response information.
        
    Raises:
        websockets.exceptions.ConnectionClosed: If WebSocket connection is closed.
        google.protobuf.message.DecodeError: If protobuf deserialization fails.
    """
    # Receive raw response data from WebSocket
    response = await ws.recv()
    
    # Deserialize protobuf response
    Response_data = TranslateResponse()
    Response_data.ParseFromString(response)

    # Extract text response, with special handling for usage responses
    response_text = Response_data.text
    if Response_data.event == Type.UsageResponse:
        # Convert protobuf message to dictionary for better readability
        response_dict = MessageToDict(Response_data)
        response_text = json.dumps(response_dict, indent=2, ensure_ascii=False)

    # Return structured response data
    return TranslateResponseData(
        event=Response_data.event,
        session_id=Response_data.response_meta.SessionID,
        sequence=Response_data.response_meta.Sequence,
        text=response_text,
        data=Response_data.data,
        spk_chg=Response_data.spk_chg,
        message=Response_data.response_meta.Message
    )


if __name__ == "__main__":
    """Main entry point for the speech translation client.
    
    Configures logging and runs the async main function.
    Logging level is set to INFO for operational monitoring.
    """
    # Configure logging for production use
    logging.basicConfig(
        level=logging.INFO,
        format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
    )
    
    # Run async main function
    asyncio.run(main("test_audio.wav"))