import torch
from transformers import AutoProcessor, pipeline
from ipex_llm.transformers import AutoModelForSpeechSeq2Seq

class ASRService:
    def __init__(self, model_id=r"C:\models\openai\whisper-large-v3-turbo",
                 batch_size=16,
                 max_workers=4):
        self.model_id = model_id
        self.batch_size = batch_size
        self.max_workers = max_workers
        self.device = "xpu" if torch.xpu.is_available() else "cpu"
        self.model = AutoModelForSpeechSeq2Seq.from_pretrained(
            self.model_id, load_in_4bit=True, optimize_model=False, use_cache=True
        ).to(self.device)
        self.model.config.forced_decoder_ids = None
        self.processor = AutoProcessor.from_pretrained(self.model_id)
        self.pipeline = pipeline(
            "automatic-speech-recognition",
            model=self.model,
            tokenizer=self.processor.tokenizer,
            feature_extractor=self.processor.feature_extractor,
            max_new_tokens=128,
            chunk_length_s=30,
            batch_size=self.batch_size,
            return_timestamps=True,
            device=self.device,
        )

    def transcribe_single(self, audio_file):
        return self.pipeline(audio_file)

    def release(self):
        self.model = None
        self.processor = None
        self.pipeline = None
        torch.xpu.empty_cache()

# 使用示例
# with ASRService() as asr_service:
#     result = asr_service.transcribe_single("path/to/audio_file.wav")
#     batch_results = asr_service.transcribe_batch(["file1.wav", "file2.wav", "file3.wav"])
