import os
import re
from pathlib import Path
from typing import Optional

from .config import Config
from .logger import log_event


def _record_with_pyaudio(output_wav: Path, duration: float, sample_rate: int, channels: int) -> None:
    import wave
    import pyaudio

    fmt = pyaudio.paInt16
    chunk = 1024

    pa = pyaudio.PyAudio()
    stream = pa.open(
        format=fmt,
        channels=channels,
        rate=sample_rate,
        input=True,
        frames_per_buffer=chunk,
    )

    frames = []
    total_chunks = int(sample_rate / chunk * duration)
    for _ in range(total_chunks):
        data = stream.read(chunk)
        frames.append(data)

    stream.stop_stream()
    stream.close()
    pa.terminate()

    with wave.open(str(output_wav), "wb") as wf:
        wf.setnchannels(channels)
        wf.setsampwidth(pa.get_sample_size(fmt))
        wf.setframerate(sample_rate)
        wf.writeframes(b"".join(frames))


def _record_with_sounddevice(output_wav: Path, duration: float, sample_rate: int, channels: int) -> None:
    import sounddevice as sd
    import soundfile as sf
    import numpy as np

    recording = sd.rec(int(duration * sample_rate), samplerate=sample_rate, channels=channels, dtype="int16")
    sd.wait()
    sf.write(str(output_wav), recording, sample_rate)


# New: stoppable recording using PyAudio or sounddevice streams
def record_audio_stoppable(output_wav: Path, stop_event, max_duration: float = 5.0) -> Path:
    """Record audio and allow external cancellation via stop_event.

    Tries PyAudio stream first; falls back to sounddevice InputStream. Stops when stop_event is set or max_duration reached.
    """
    output_wav = Path(output_wav)
    sr = Config.MIC_SAMPLE_RATE
    ch = Config.MIC_CHANNELS
    output_wav.parent.mkdir(parents=True, exist_ok=True)
    log_event("RECORD_START", f"path={output_wav}, sr={sr}, ch={ch}")

    # Try PyAudio streaming
    try:
        import wave
        import pyaudio
        import time as _time

        fmt = pyaudio.paInt16
        chunk = 1024
        pa = pyaudio.PyAudio()
        stream = pa.open(
            format=fmt,
            channels=ch,
            rate=sr,
            input=True,
            frames_per_buffer=chunk,
        )
        frames = []
        start = _time.time()
        while not stop_event.is_set() and (_time.time() - start) < max_duration:
            data = stream.read(chunk)
            frames.append(data)
        stream.stop_stream()
        stream.close()
        pa.terminate()
        with wave.open(str(output_wav), "wb") as wf:
            wf.setnchannels(ch)
            wf.setsampwidth(pa.get_sample_size(fmt))
            wf.setframerate(sr)
            wf.writeframes(b"".join(frames))
        cancelled = stop_event.is_set()
        log_event("RECORD_END", f"path={output_wav}, method=pyaudio, reason={'cancelled' if cancelled else 'time_elapsed'}, frames={len(frames)}")
        return output_wav
    except Exception:
        pass

    # Fallback: sounddevice InputStream with callback
    try:
        import sounddevice as sd
        import soundfile as sf
        import time as _time

        sf_file = sf.SoundFile(str(output_wav), mode='w', samplerate=sr, channels=ch, subtype='PCM_16')

        def callback(indata, frames, time, status):
            if stop_event.is_set():
                raise sd.CallbackStop
            sf_file.write(indata.copy())

        with sd.InputStream(samplerate=sr, channels=ch, dtype='int16', callback=callback):
            sd.sleep(int(max_duration * 1000))
        sf_file.close()
        log_event("RECORD_END", f"path={output_wav}, method=sounddevice_stream, reason={'cancelled' if stop_event.is_set() else 'time_elapsed'}")
        return output_wav
    except Exception:
        # As last resort, do blocking record
        log_event("RECORD_FALLBACK_BLOCKING", f"path={output_wav}, duration={max_duration}")
        _record_with_sounddevice(output_wav, max_duration, sr, ch)
        log_event("RECORD_END", f"path={output_wav}, method=sounddevice_blocking")
        return output_wav


def record_audio(output_wav: Path, duration: float = 5.0) -> Path:
    """Record audio from microphone into a WAV file.

    Tries PyAudio first, falls back to sounddevice if unavailable.
    """
    sr = Config.MIC_SAMPLE_RATE
    ch = Config.MIC_CHANNELS

    output_wav = Path(output_wav)
    output_wav.parent.mkdir(parents=True, exist_ok=True)
    log_event("RECORD_START_LEGACY", f"path={output_wav}, sr={sr}, ch={ch}, duration={duration}")

    try:
        _record_with_pyaudio(output_wav, duration, sr, ch)
    except Exception:
        _record_with_sounddevice(output_wav, duration, sr, ch)

    log_event("RECORD_END_LEGACY", f"path={output_wav}")
    return output_wav


def _build_initial_prompt() -> str:
    # 结合种子与热词构建引导提示，提升专名与常见指令命中
    seed = getattr(Config, "ASR_INITIAL_PROMPT_SEED", "以下为中文语音助手指令：")
    # 基础动作词，避免过多冗余
    base_terms = ["打开", "关闭", "搜索", "点击第一条链接", "播放", "写文章"]
    hotwords = list(getattr(Config, "ASR_HOTWORDS", []))
    # 去重后拼接
    seen = set()
    terms = []
    for w in hotwords + base_terms:
        if w not in seen:
            seen.add(w)
            terms.append(w)
    prompt = seed + "".join([t if t.endswith(" ") else f" {t}" for t in terms])
    return prompt.strip()


def transcribe_with_whisper(audio_path: Path, language: str = "zh") -> str:
    import whisper

    model_name = Config.WHISPER_MODEL
    lang = language or getattr(Config, "ASR_LANGUAGE", "zh")
    init_prompt = _build_initial_prompt()
    log_event("ASR_START", f"model={model_name}, audio={audio_path}")
    try:
        model = whisper.load_model(model_name)
        # 首选：保守解码 + beam search（与旧版参数兼容性更好）
        try:
            result = model.transcribe(
                str(audio_path),
                language=lang,
                temperature=0.0,
                beam_size=5,
                best_of=5,
                patience=1,
                condition_on_previous_text=False,
                initial_prompt=init_prompt,
            )
        except AssertionError:
            # 回退：仅保留温度与提示，避免版本不兼容导致的参数错误
            result = model.transcribe(
                str(audio_path),
                language=lang,
                temperature=0.0,
                condition_on_previous_text=False,
                initial_prompt=init_prompt,
            )
        text = result.get("text", "").strip()
    except Exception as e:
        log_event("ASR_ERROR", f"exception={type(e).__name__}: {e}")
        text = ""
    log_event("ASR_RESULT", f"len={len(text)}, text={text[:60]}")
    return text


_FILLERS = [
    "嗯",
    "那个",
    "就是",
    "然后",
    "吧",
    "啊",
    "呀",
    "呢",
    "喂",
    "哈",
    "嘿",
    "嘛",
    "轻音",
]


def preprocess_text(text: str) -> str:
    """Remove filler words, normalize common misrecognitions, and tidy whitespace."""
    cleaned = text
    # Remove common fillers
    for f in _FILLERS:
        cleaned = cleaned.replace(f, "")
    # Normalize common misrecognitions (e.g., 记事本 variants)
    normalize_map = {
        "即时本": "记事本",
        "祭祀本": "记事本",
        "计师本": "记事本",
        "纪事本": "记事本",
    }
    for wrong, right in normalize_map.items():
        if wrong in cleaned:
            cleaned = cleaned.replace(wrong, right)
    # 应用全局纠错映射（专名等高价值词）
    try:
        corrections = getattr(Config, "ASR_CORRECTIONS", {})
        for wrong, right in corrections.items():
            if wrong in cleaned:
                cleaned = cleaned.replace(wrong, right)
    except Exception:
        pass
    # Tidy whitespace
    cleaned = re.sub(r"\s+", " ", cleaned).strip()
    log_event("ASR_CLEAN", f"before_len={len(text)}, after_len={len(cleaned)}")
    try:
        if cleaned != text:
            log_event("ASR_NORMALIZE", f"text={text[:60]} -> cleaned={cleaned[:60]}")
    except Exception:
        pass
    return cleaned