from faster_whisper import WhisperModel
import threading
import time
from typing import List
from whisper.observers.base import TranscriptionObserver
from whisper.sources.base import AudioSource
import numpy as np


class AudioTranscriber:
    """音频转录器"""

    def __init__(self, model_size: str = "large-v3-turbo", device: str = "cpu", compute_type: str = "int8"):
        self.model = WhisperModel(model_size_or_path=model_size, device=device, compute_type=compute_type)
        self.observers: List[TranscriptionObserver] = []
        self.text_buffer = []
        self.processing_thread = None
        self.is_running = False

    def add_observer(self, observer: TranscriptionObserver):
        self.observers.append(observer)

    def remove_observer(self, observer: TranscriptionObserver):
        self.observers.remove(observer)

    def notify_observers(self, text: str, is_final: bool = False):
        for observer in self.observers:
            observer.on_transcription_update(text, is_final)

    def process_audio(self, audio_source: AudioSource):
        self.is_running = True
        buffer = []
        while self.is_running:
            try:
                while len(buffer) < audio_source.rate:
                    if not audio_source.audio_queue.empty():
                        buffer.extend(audio_source.audio_queue.get())
                    else:
                        time.sleep(0.1)
                audio_data = np.array(buffer)
                if self.processing_thread and self.processing_thread.is_alive():
                    self.processing_thread.join()
                self.processing_thread = threading.Thread(
                    target=self.process_audio_data,
                    args=(audio_data,)
                )
                self.processing_thread.start()
                buffer = []
            except Exception as e:
                print(f"\n处理错误: {str(e)}")
                continue

    def process_audio_data(self, audio_data: np.ndarray):
        try:
            segments, info = self.model.transcribe(
                audio_data,
                beam_size=5,
                vad_filter=True,
                vad_parameters=dict(min_silence_duration_ms=500),
                condition_on_previous_text=True
            )
            for segment in segments:
                if not self.text_buffer:
                    self.text_buffer.append(segment.text)
                else:
                    self.update_text_buffer(segment.text)
                self.notify_observers(' '.join(self.text_buffer))
        except Exception as e:
            print(f"\n处理错误: {str(e)}")

    def update_text_buffer(self, new_text: str):
        if len(self.text_buffer) > 0:
            last_text = self.text_buffer[-1]
            if self.calculate_similarity(last_text, new_text) > 0.7:
                if len(new_text) > len(last_text):
                    self.text_buffer[-1] = new_text
            else:
                self.text_buffer.append(new_text)
                if len(self.text_buffer) > 10:
                    self.text_buffer = self.text_buffer[-10:]
        else:
            self.text_buffer.append(new_text)

    @staticmethod
    def calculate_similarity(text1: str, text2: str) -> float:
        words1 = set(text1.split())
        words2 = set(text2.split())
        intersection = words1.intersection(words2)
        union = words1.union(words2)
        return len(intersection) / len(union) if union else 0
