import os
import time
import keyboard
import numpy as np
import sounddevice as sd

from opencc import OpenCC
from typing import Any, Dict
from faster_whisper import WhisperModel

os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE"
os.environ["OMP_NUM_THREADS"] = "1"


class VoiceManager:
    def __init__(self, cfg: Dict[str, Any]):
        self.sample_rate = cfg.get('voice_sample_rate', 16000)
        self.channels = cfg.get('voice_channels', 1)
        self.device = cfg.get('voice_cuda_device', 'cuda')
        self.model_path = cfg.get('voice_model_path')
        
        default_keywords = [
            "字幕", "amara", "社群提供", "感谢观看", "谢谢观看", "订阅", "点赞",
            "铃铛", "掌声", "优优独播剧场", "exclusive", "ming pao", "请不吝",
            "李宗盛", "喜欢我的影片", "明镜与点点栏目", "多谢您的观看"
        ]
        self.noisy_keywords = [OpenCC('t2s').convert(w).lower() for w in cfg.get('voice_keywords', default_keywords)]
        
        self.cc = OpenCC('t2s')
        self.model = WhisperModel(self.model_path, device=self.device)

    def _wait_for_ctrl_triple_press(self, interval=0.3):
        print('连按3次ctrl说话,语音中连按3次ctrl中断...')
        presses = 0
        last_time = None
        while True:
            keyboard.wait("ctrl")
            now = time.time()
            if last_time is None or now - last_time <= interval:
                presses += 1
            else:
                presses = 1
            last_time = now
            if presses == 3:
                break
            while keyboard.is_pressed("ctrl"):
                time.sleep(0.05)
        while not keyboard.is_pressed("ctrl"):
            time.sleep(0.05)

    def _record_while_ctrl_pressed(self):
        rec_list = []
        stream = sd.InputStream(samplerate=self.sample_rate, channels=self.channels, dtype='float32')
        with stream:
            print("开始识别")
            while keyboard.is_pressed("ctrl"):
                audio = stream.read(1024)[0].flatten()
                rec_list.append(audio)
        if not rec_list:
            return None
        audio_data = np.concatenate(rec_list)
        audio_int16 = (audio_data * 32767).astype(np.int16)
        return audio_int16

    def listen(self):
        self._wait_for_ctrl_triple_press()
        audio = self._record_while_ctrl_pressed()
        if audio is None or len(audio) == 0:
            return ''
        segments, _ = self.model.transcribe(audio, language="zh")
        result = ''.join([segment.text for segment in segments])
        result = self.cc.convert(result)
        result_lc = result.strip().lower()
        if any(keyword in result_lc for keyword in self.noisy_keywords):
            return ''
        return result.strip()
