from faster_whisper import WhisperModel
import pyaudio
import wave
import numpy as np
import threading
import queue
import time
import argparse
import sounddevice as sd
import soundfile as sf
from datetime import datetime
from abc import ABC, abstractmethod
from typing import List, Callable, Optional

class TranscriptionObserver(ABC):
    """转录观察者基类"""
    @abstractmethod
    def on_transcription_update(self, text: str, is_final: bool = False):
        """当转录文本更新时调用"""
        pass

class ConsoleObserver(TranscriptionObserver):
    """控制台输出观察者"""
    def __init__(self, show_timestamp: bool = True):
        self.show_timestamp = show_timestamp
        self.last_update_time = time.time()
        self.update_interval = 0.5

    def on_transcription_update(self, text: str, is_final: bool = False):
        current_time = time.time()
        if is_final or current_time - self.last_update_time >= self.update_interval:
            if self.show_timestamp:
                print(f"\r{datetime.now().strftime('%H:%M:%S')} | {text}", end="", flush=True)
            else:
                print(f"\r{text}", end="", flush=True)
            self.last_update_time = current_time

class FileObserver(TranscriptionObserver):
    """文件输出观察者"""
    def __init__(self, file_path: str):
        self.file_path = file_path
        self.buffer = []

    def on_transcription_update(self, text: str, is_final: bool = False):
        self.buffer.append(text)
        if is_final:
            with open(self.file_path, 'w', encoding='utf-8') as f:
                f.write('\n'.join(self.buffer))

class AudioSource(ABC):
    """音频源基类"""
    @abstractmethod
    def start(self):
        """开始音频采集"""
        pass

    @abstractmethod
    def stop(self):
        """停止音频采集"""
        pass

class MicrophoneSource(AudioSource):
    """麦克风音频源"""
    def __init__(self, chunk_size: int = 1024, channels: int = 1, rate: int = 16000):
        self.chunk_size = chunk_size
        self.channels = channels
        self.rate = rate
        self.audio_queue = queue.Queue()
        self.is_running = False
        self.stream = None
        self.p = None

    def audio_callback(self, in_data, frame_count, time_info, status):
        if status:
            print(f"状态: {status}")
        audio_data = np.frombuffer(in_data, dtype=np.float32)
        self.audio_queue.put(audio_data)
        return (in_data, pyaudio.paContinue)

    def start(self):
        self.is_running = True
        self.p = pyaudio.PyAudio()
        
        # 显示可用的输入设备
        print("可用的输入设备:")
        for i in range(self.p.get_device_count()):
            dev_info = self.p.get_device_info_by_index(i)
            if dev_info['maxInputChannels'] > 0:
                print(f"{i}: {dev_info['name']}")
        
        default_input = self.p.get_default_input_device_info()
        print(f"\n使用默认输入设备: {default_input['name']}")
        
        self.stream = self.p.open(
            format=pyaudio.paFloat32,
            channels=self.channels,
            rate=self.rate,
            input=True,
            frames_per_buffer=self.chunk_size,
            stream_callback=self.audio_callback
        )
        
        print("\n开始录音，按 Ctrl+C 停止...")
        print("=" * 50)
        
        try:
            self.stream.start_stream()
            while self.stream.is_active():
                time.sleep(0.1)
        except KeyboardInterrupt:
            print("\n停止录音...")
        finally:
            self.stop()

    def stop(self):
        self.is_running = False
        if self.stream:
            self.stream.stop_stream()
            self.stream.close()
        if self.p:
            self.p.terminate()

class SystemAudioSource(AudioSource):
    """系统音频源"""
    def __init__(self, chunk_size: int = 1024, channels: int = 1, rate: int = 16000):
        self.chunk_size = chunk_size
        self.channels = channels
        self.rate = rate
        self.audio_queue = queue.Queue()
        self.is_running = False
        self.stream = None
        self.buffer = []

    def audio_callback(self, indata, frames, time, status):
        if status:
            print(f"状态: {status}")
        self.buffer.extend(indata[:, 0])
        
        if len(self.buffer) >= self.rate:
            audio_data = np.array(self.buffer[:self.rate])
            self.buffer = self.buffer[self.rate:]
            self.audio_queue.put(audio_data)

    def start(self):
        self.is_running = True
        print("开始捕获系统音频...")
        print("支持的设备:")
        devices = sd.query_devices()
        for i, device in enumerate(devices):
            print(f"{i}: {device['name']}")
        
        default_output = sd.query_devices(kind='output')
        print(f"\n使用默认输出设备: {default_output['name']}")
        
        try:
            with sd.InputStream(callback=self.audio_callback,
                              channels=self.channels,
                              samplerate=self.rate,
                              dtype=np.float32):
                print("\n正在监听系统音频，按 Ctrl+C 停止...")
                print("=" * 50)
                while self.is_running:
                    time.sleep(0.1)
        except KeyboardInterrupt:
            print("\n停止捕获系统音频...")
        finally:
            self.stop()

    def stop(self):
        self.is_running = False

class AudioTranscriber:
    """音频转录器"""
    def __init__(self, model_size: str = "large-v3-turbo", device: str = "cpu", compute_type: str = "int8"):
        self.model = WhisperModel(model_size_or_path=model_size, device=device, compute_type=compute_type)
        self.observers: List[TranscriptionObserver] = []
        self.text_buffer = []
        self.processing_thread = None
        self.is_running = False

    def add_observer(self, observer: TranscriptionObserver):
        """添加观察者"""
        self.observers.append(observer)

    def remove_observer(self, observer: TranscriptionObserver):
        """移除观察者"""
        self.observers.remove(observer)

    def notify_observers(self, text: str, is_final: bool = False):
        """通知所有观察者"""
        for observer in self.observers:
            observer.on_transcription_update(text, is_final)

    def process_audio(self, audio_source: AudioSource):
        """处理音频数据"""
        self.is_running = True
        buffer = []
        
        while self.is_running:
            try:
                # 收集1秒的音频数据
                while len(buffer) < audio_source.rate:
                    if not audio_source.audio_queue.empty():
                        buffer.extend(audio_source.audio_queue.get())
                    else:
                        time.sleep(0.1)
                
                # 转换为numpy数组
                audio_data = np.array(buffer)
                
                # 如果之前的处理线程还在运行，等待它完成
                if self.processing_thread and self.processing_thread.is_alive():
                    self.processing_thread.join()
                
                # 在新线程中处理音频数据
                self.processing_thread = threading.Thread(
                    target=self.process_audio_data,
                    args=(audio_data,)
                )
                self.processing_thread.start()
                
                # 清空缓冲区
                buffer = []
                
            except Exception as e:
                print(f"\n处理错误: {str(e)}")
                continue

    def process_audio_data(self, audio_data: np.ndarray):
        """处理音频数据并通知观察者"""
        try:
            segments, info = self.model.transcribe(
                audio_data,
                beam_size=5,
                vad_filter=True,
                vad_parameters=dict(min_silence_duration_ms=500),
                condition_on_previous_text=True
            )
            
            for segment in segments:
                if not self.text_buffer:
                    self.text_buffer.append(segment.text)
                else:
                    self.update_text_buffer(segment.text)
                
                self.notify_observers(' '.join(self.text_buffer))
                
        except Exception as e:
            print(f"\n处理错误: {str(e)}")

    def update_text_buffer(self, new_text: str):
        """更新文本缓冲区"""
        if len(self.text_buffer) > 0:
            last_text = self.text_buffer[-1]
            if self.calculate_similarity(last_text, new_text) > 0.7:
                if len(new_text) > len(last_text):
                    self.text_buffer[-1] = new_text
            else:
                self.text_buffer.append(new_text)
                if len(self.text_buffer) > 10:
                    self.text_buffer = self.text_buffer[-10:]
        else:
            self.text_buffer.append(new_text)

    @staticmethod
    def calculate_similarity(text1: str, text2: str) -> float:
        """计算文本相似度"""
        words1 = set(text1.split())
        words2 = set(text2.split())
        intersection = words1.intersection(words2)
        union = words1.union(words2)
        return len(intersection) / len(union) if union else 0

def main():
    parser = argparse.ArgumentParser(description='语音识别工具')
    parser.add_argument('--mode', choices=['stream', 'file', 'system'], default='stream',
                      help='识别模式：stream(麦克风实时识别) 或 file(文件识别) 或 system(系统音频识别)')
    parser.add_argument('--input', type=str,
                      help='输入文件路径（仅在file模式下使用）')
    parser.add_argument('--output', type=str,
                      help='输出文件路径（可选）')
    parser.add_argument('--model', type=str, default='large-v3-turbo',
                      help='模型大小：small, medium, large-v3, large-v3-turbo')
    
    args = parser.parse_args()
    
    # 创建转录器
    transcriber = AudioTranscriber(model_size=args.model)
    
    # 添加观察者
    transcriber.add_observer(ConsoleObserver())
    if args.output:
        transcriber.add_observer(FileObserver(args.output))
    
    if args.mode == 'stream':
        audio_source = MicrophoneSource()
        transcriber.process_audio(audio_source)
        audio_source.start()
    elif args.mode == 'system':
        audio_source = SystemAudioSource()
        transcriber.process_audio(audio_source)
        audio_source.start()
    elif args.mode == 'file':
        if not args.input:
            print("错误：文件模式下必须指定输入文件路径")
            return
        segments, info = transcriber.model.transcribe(args.input)
        for segment in segments:
            print(f"[{segment.start:.1f}s → {segment.end:.1f}s] {segment.text}")

if __name__ == '__main__':
    main()
