import json
import numpy as np
import tritonclient.http as httpclient
import argparse
import wave
import time
import os
from typing import List, Dict, Any

class WenetTritonClient:
    def __init__(self, url: str = "localhost:8000"):
        self.client = httpclient.InferenceServerClient(url=url)
        
    def preprocess_audio(self, wav_path: str) -> tuple:
        """读取并预处理音频文件"""
        if not os.path.exists(wav_path):
            raise FileNotFoundError(f"Audio file not found: {wav_path}")
            
        with wave.open(wav_path, 'rb') as wav_file:
            # 获取音频参数
            sample_width = wav_file.getsampwidth()
            num_channels = wav_file.getnchannels()
            frame_rate = wav_file.getframerate()
            num_frames = wav_file.getnframes()
            
            print(f"Audio info: {num_channels} channels, {sample_width} bytes/sample, "
                  f"{frame_rate} Hz, {num_frames} frames")
            
            # 读取音频数据
            audio_data = wav_file.readframes(num_frames)
            
        # 转换为 numpy 数组
        if sample_width == 2:
            audio_array = np.frombuffer(audio_data, dtype=np.int16).astype(np.float32) / 32768.0
        elif sample_width == 4:
            audio_array = np.frombuffer(audio_data, dtype=np.int32).astype(np.float32) / 2147483648.0
        else:
            audio_array = np.frombuffer(audio_data, dtype=np.int8).astype(np.float32) / 128.0
            
        # 如果是多声道，取第一个声道
        if num_channels > 1:
            audio_array = audio_array.reshape(-1, num_channels)
            audio_array = audio_array[:, 0]
            print(f"Converted multi-channel audio to mono, shape: {audio_array.shape}")
            
        print(f"Audio data range: [{audio_array.min():.6f}, {audio_array.max():.6f}]")
        return audio_array, np.array([len(audio_array)], dtype=np.int32)
    
    def send_audio_for_rescoring(self, audio_data: np.ndarray) -> str:
        """
        发送音频数据到 attention_rescoring 模型进行识别
        """
        if len(audio_data) == 0:
            print("Warning: Empty audio data received")
            return ""
        
        # 准备输入数据 - 注意批次维度
        wav_data = audio_data.astype(np.float32).reshape(1, -1)  # [1, audio_length]
        wav_lens = np.array([[len(audio_data)]], dtype=np.int32)  # [1, 1]
        
        print(f"Sending audio for rescoring - samples: {len(audio_data)}, "
              f"batch_size: 1, shapes - WAV: {wav_data.shape}, WAV_LENS: {wav_lens.shape}")
        
        # 创建输入tensors
        inputs = [
            httpclient.InferInput("WAV", wav_data.shape, "FP32"),
            httpclient.InferInput("WAV_LENS", wav_lens.shape, "INT32")
        ]
        
        # 设置输入数据
        inputs[0].set_data_from_numpy(wav_data)
        inputs[1].set_data_from_numpy(wav_lens)
        
        # 设置输出
        outputs = [httpclient.InferRequestedOutput("TRANSCRIPTS")]
        
        # 发送请求到 attention_rescoring 模型
        try:
            start_time = time.time()
            response = self.client.infer(
                model_name="attention_rescoring",
                inputs=inputs,
                outputs=outputs
            )
            inference_time = time.time() - start_time
            
            # 获取识别结果
            result = response.as_numpy("TRANSCRIPTS")
            if result is not None and len(result) > 0:
                transcript = result[0].decode('utf-8')
                print(f"Inference completed in {inference_time:.3f} seconds")
                return transcript
            else:
                print("No transcript result received")
                return ""
            
        except Exception as e:
            print(f"Error during inference: {e}")
            import traceback
            traceback.print_exc()
            return ""
    
    def process_audio_file(self, wav_path: str) -> str:
        """
        处理整个音频文件（非流式，一次性识别）
        """
        print(f"Processing audio file: {wav_path}")
        
        # 预处理音频
        audio_data, total_length = self.preprocess_audio(wav_path)
        
        print(f"\nStarting attention rescoring inference...")
        print(f"Total audio samples: {len(audio_data)}")
        print(f"Audio duration: {len(audio_data) / 16000:.2f} seconds")
        
        # 发送整个音频进行识别
        transcript = self.send_audio_for_rescoring(audio_data)
        
        return transcript
    
    def batch_process_audio_files(self, wav_paths: List[str]) -> List[str]:
        """
        批量处理多个音频文件
        """
        transcripts = []
        
        for i, wav_path in enumerate(wav_paths):
            print(f"\n{'='*60}")
            print(f"Processing file {i+1}/{len(wav_paths)}: {wav_path}")
            print(f"{'='*60}")
            
            transcript = self.process_audio_file(wav_path)
            transcripts.append(transcript)
            
            if transcript:
                print(f"✅ Result: {transcript}")
            else:
                print("❌ No result")
        
        return transcripts

def test_server_connection(client: WenetTritonClient):
    """测试服务器连接和模型状态"""
    print("Testing server connection...")
    
    try:
        # 检查服务器是否就绪
        if not client.client.is_server_ready():
            print("❌ Server is not ready!")
            return False
        
        print("✅ Server is ready")
        
        # 检查所有模型状态
        models = ["attention_rescoring", "feature_extractor", "encoder", "scoring"]
        all_ready = True
        
        for model in models:
            if client.client.is_model_ready(model):
                print(f"✅ Model {model} is ready")
            else:
                print(f"❌ Model {model} is not ready")
                all_ready = False
        
        return all_ready
        
    except Exception as e:
        print(f"❌ Connection test failed: {e}")
        return False

def get_model_config(client: WenetTritonClient, model_name: str):
    """获取模型配置信息"""
    try:
        config = client.client.get_model_config(model_name)
        print(f"\n{model_name} model config:")
        print(f"Platform: {config.get('platform', 'N/A')}")
        print(f"Max batch size: {config.get('max_batch_size', 'N/A')}")
        
        print("Inputs:")
        for input_config in config.get('input', []):
            print(f"  - {input_config['name']}: {input_config['data_type']} {input_config['dims']}")
        
        print("Outputs:")
        for output_config in config.get('output', []):
            print(f"  - {output_config['name']}: {output_config['data_type']} {output_config['dims']}")
            
        if 'ensemble_scheduling' in config:
            print("This is an ensemble model")
            print("Ensemble steps:")
            for i, step in enumerate(config['ensemble_scheduling']['step']):
                print(f"  {i+1}. {step['model_name']}")
                
    except Exception as e:
        print(f"Error getting config for {model_name}: {e}")

def test_simple_inference(client: WenetTritonClient, wav_path: str):
    """测试简单推理"""
    print("\nTesting simple inference...")
    
    # 读取音频
    audio_data, total_length = client.preprocess_audio(wav_path)
    
    # 使用前1秒的音频进行快速测试
    test_duration = 1.0  # 1秒
    test_samples = int(16000 * test_duration)
    test_audio = audio_data[:test_samples]
    
    print(f"Using first {test_duration} second for quick test ({test_samples} samples)")
    
    transcript = client.send_audio_for_rescoring(test_audio)
    
    if transcript:
        print(f"✅ Quick test successful: {transcript}")
        return True
    else:
        print("❌ Quick test failed")
        return False

def main():
    parser = argparse.ArgumentParser(description='Wenet Triton Client for Attention Rescoring')
    parser.add_argument('--url', type=str, default='localhost:8000',
                       help='Triton server URL (default: localhost:8000)')
    parser.add_argument('--wav-file', type=str, 
                       default='../../../audio.wav',
                       help='Path to WAV file for processing')
    parser.add_argument('--batch-files', type=str, nargs='+',
                       help='Multiple WAV files for batch processing')
    parser.add_argument('--test-only', action='store_true',
                       help='Only test server connection without processing audio')
    parser.add_argument('--show-config', action='store_true',
                       help='Show model configurations')
    parser.add_argument('--quick-test', action='store_true',
                       help='Quick test with first 1 second of audio')
    
    args = parser.parse_args()
    
    # 创建客户端
    client = WenetTritonClient(url=args.url)
    
    try:
        # 测试服务器连接
        if not test_server_connection(client):
            return
        
        if args.show_config:
            print("\n" + "="*60)
            print("MODEL CONFIGURATIONS")
            print("="*60)
            get_model_config(client, "attention_rescoring")
            return
        
        if args.quick_test:
            test_simple_inference(client, args.wav_file)
            return
        
        if args.test_only:
            print("\nConnection test completed successfully!")
            return
        
        print("\n" + "="*60)
        print("WENET ATTENTION RESCORING CLIENT")
        print("="*60)
        
        start_time = time.time()
        
        if args.batch_files:
            # 批量处理多个文件
            transcripts = client.batch_process_audio_files(args.batch_files)
            
            print("\n" + "="*60)
            print("BATCH PROCESSING RESULTS")
            print("="*60)
            for i, (wav_path, transcript) in enumerate(zip(args.batch_files, transcripts)):
                print(f"File {i+1}: {os.path.basename(wav_path)}")
                print(f"Result: {transcript}\n")
                
        else:
            # 处理单个文件
            transcript = client.process_audio_file(args.wav_file)
            end_time = time.time()
            
            print("\n" + "="*60)
            print("FINAL RESULT")
            print("="*60)
            if transcript:
                print(f"🎯 Transcript: {transcript}")
            else:
                print("❌ No transcript received")
            
            print(f"\n⏱️  Total processing time: {end_time - start_time:.2f} seconds")
            print(f"📊 Audio duration: {len(transcript.split()) if transcript else 0} characters")
        
    except Exception as e:
        print(f"❌ Error: {e}")
        import traceback
        traceback.print_exc()

if __name__ == "__main__":
    main()