import os
import time
import threading
import queue
import sounddevice as sd
import soundfile as sf
import numpy as np
import whisper
import argparse
from ds_module import DeepSeekAPI

# 解析命令行参数
parser = argparse.ArgumentParser(description='语音转录工具')
parser.add_argument('--device', type=int, default=None, help='音频设备ID')
parser.add_argument('--channels', type=int, default=2, help='音频通道数，默认2')
parser.add_argument('--chunk', type=int, default=1024, help='每个缓冲区的帧数')
parser.add_argument('--duration', type=int, default=5, help='每次录音时长（秒）')
parser.add_argument('--rate', type=int, default=48000, help='采样率，默认48000，可能值16000,22100')
parser.add_argument('--output', type=str, default='recordings', help='输出目录')
parser.add_argument('--model', type=str, default='turbo', help='Whisper模型名称，默认turbo，可能值larg,larg-v2,larg-v3,medium,small,base,tiny')
args = parser.parse_args()

# 使用命令行参数
CHANNELS = args.channels
CHUNK = args.chunk
RECORD_SECONDS = args.duration
RATE = args.rate
OUTPUT_DIR = args.output
MODEL_NAME = args.model

# 自动查找设备ID
def find_device_id():
    devices = sd.query_devices()
    for i, device in enumerate(devices):
        if "Audio Stereo input" in device['name']:
            return i
    raise ValueError("未找到包含 'Audio Stereo input' 的设备")

DEVICE_ID = args.device if args.device is not None else find_device_id()

def resample_audio(audio, orig_rate, orig_channel, target_channel=1, target_rate=16000):
    """
    将音频数据从原始采样率和声道数转换为目标采样率和声道数。

    参数:
        audio (np.ndarray): 原始音频数据，shape=(samples, channels)，dtype=float32。
        orig_rate (int): 原始采样率。
        orig_channel (int): 原始声道数。
        target_channel (int): 目标声道数（默认为1，单声道）。
        target_rate (int): 目标采样率（默认为16000Hz）。

    返回:
        np.ndarray: 重采样后的音频数据，shape=(new_samples, target_channel)，dtype=float32。
    """
    # 确保输入音频是二维数组 (samples, channels)
    if audio.ndim == 1:
        audio = audio.reshape(-1, 1)  # 如果是单声道，转换为二维数组
    
    # 将多声道数据转换为单声道（如果目标声道数为1）
    if target_channel == 1 and orig_channel > 1:
        audio = np.mean(audio, axis=1, keepdims=True)  # 取平均值
    elif target_channel > 1 and orig_channel == 1:
        # 如果目标声道数大于1且原始是单声道，复制声道
        audio = np.tile(audio, (1, target_channel))
    
    # 计算重采样后的样本数
    orig_length = audio.shape[0]
    target_length = int(orig_length * target_rate / orig_rate)
    
    # 生成原始音频的时间点
    orig_times = np.arange(orig_length) / orig_rate

    # 生成目标音频的时间点
    target_times = np.arange(target_length) / target_rate
    
    # 对每个声道进行重采样
    resampled_audio = np.zeros((target_length, target_channel), dtype=np.float32)
    for channel in range(target_channel):
        resampled_audio[:, channel] = np.interp(target_times, orig_times, audio[:, channel % orig_channel])
    
    return resampled_audio

# 创建保存录音的目录
if not os.path.exists(OUTPUT_DIR):
    os.makedirs(OUTPUT_DIR)

print(f"当前参数配置: device={args.device}, channels={args.channels}, chunk={args.chunk}, duration={args.duration}, rate={args.rate}, output={args.output}, model={args.model}")

# 创建一个队列用于存储录制的音频数据
audio_queue = queue.Queue()

def process_full_text(full_text):
    # 定义需要移除的尾部字符
    trailing_chars = ('...', '---', '-', '—')
    has = False
    for s in trailing_chars:
        if full_text.endswith(s):
            has = True
            break
    if not has:
        return full_text,""

    for chars in trailing_chars:
        if full_text.endswith(chars):
            full_text = full_text[:-len(chars)]  # 移除尾部字符
            break

    # 查找最后一个标点符号的位置
    punctuation_marks = (',', '.', '...')
    last_punct_index = max(full_text.rfind(mark) for mark in punctuation_marks)

    # 根据标点符号位置分割文本
    if last_punct_index != -1:
        part1 = full_text[:last_punct_index + 1]  # 包含标点符号
        part2 = full_text[last_punct_index + 1:].strip()  # 移除前后空格
        return part1, part2
    else:
        return full_text, ""
    
def transcribe_audio():
    """转录线程函数，负责从队列中取出音频并进行转录"""
    prompt ="""
Transcribe casual speech with:
- Ellipses for pauses: "Well... maybe not."
- Stressed words in caps: "That's AWESOME!"
- Informal interjections: uh-huh, mm-mm
- Phrasal verbs: gonna, wanna
Example: "Wait... you're gonna love this! Seriously—it's AMAZING."
"""
    model = whisper.load_model(MODEL_NAME)
    deepseek_client = DeepSeekAPI()
    first = True
    lang = "en"
    pending_text = ""  # 用于存储未完成的文本部分

    while True:
        if not audio_queue.empty():
            start_time = time.time()
            data = audio_queue.get()
            if np.sum(np.abs(data)) < 50 / RECORD_SECONDS:
                first = True
                continue
            audio = resample_audio(data, RATE, CHANNELS)
            if first:
                audio2 = whisper.pad_or_trim(audio.flatten())
                mel = whisper.log_mel_spectrogram(audio2.flatten(), n_mels=model.dims.n_mels).to(model.device)
                # detect the spoken language
                _, prob = model.detect_language(mel)
                lang = max(prob, key=lambda k: prob[k])
                print("<<切换语言>> ", lang)
            
            result = model.transcribe(audio.flatten(),
                                      language=lang,
                                      fp16=False,
                                      initial_prompt=prompt,
                                      word_timestamps=False)

            if len(pending_text)>0:
                full_text = pending_text + ' ' + result["text"]
            else:
                full_text = result["text"]
            pending_text = ""

            completed_text,pending_text = process_full_text(full_text)

            print(completed_text)

            if lang != 'zh':
                translation_prompt = f"Translate the following English text to Chinese: {completed_text}"
                for translation in deepseek_client.chat_completion(translation_prompt, stream=False):
                    print(">>", translation)

            first = False

# 创建并启动转录线程
transcribe_thread = threading.Thread(target=transcribe_audio)
transcribe_thread.daemon = True
transcribe_thread.start()

try:
    print("开始录音...")
    while True:
        data = sd.rec(int(RECORD_SECONDS * RATE), samplerate=RATE, channels=CHANNELS, device=DEVICE_ID)
        sd.wait()
        audio_queue.put(data)

except KeyboardInterrupt:
    print("录音结束")
