#!/usr/bin/env python3
"""
WhisperLiveKit 实时语音转写系统 - Flask 版本
基于 Flask 和 SocketIO 的真实音频转写应用
"""

import os
import json
import logging
import time
import warnings
from datetime import datetime

# 抑制PyTorch和gevent的弃用警告
warnings.filterwarnings("ignore", category=FutureWarning, module="gevent")
warnings.filterwarnings("ignore", category=UserWarning, module="pkg_resources")
from flask import Flask, render_template, request, jsonify
from flask_socketio import SocketIO, emit, join_room, leave_room
import whisperlivekit
import numpy as np
import soundfile as sf
from pydub import AudioSegment
import io
import threading
import queue
import pyaudio

# 添加繁简转换支持
try:
    from opencc import OpenCC
    cc = OpenCC('t2s')  # 繁体转简体
    HAS_OPENCC = True
except ImportError:
    HAS_OPENCC = False
    print("提示: 安装 opencc-python-reimplemented 可以获得更好的繁简转换效果")

# 配置日志
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)

def convert_to_simplified(text):
    """将文本转换为简体中文"""
    if HAS_OPENCC:
        # 使用 opencc 进行全面的繁简转换
        return cc.convert(text)
    else:
        # 如果没有 opencc，提示安装
        logger.warning("请安装 opencc-python-reimplemented 以获得完整的繁简转换支持")
        logger.warning("运行: pip install opencc-python-reimplemented")
        return text  # 返回原文，不进行转换

def post_process_text(text):
    """后处理文本，提高质量"""
    import re
    
    # 清理重复的标点
    text = re.sub(r'[，。！？、；：]{2,}', '。', text)
    text = re.sub(r'[，。！？、；：]+$', '。', text)
    
    # 清理多余的空格
    text = re.sub(r'\s+', ' ', text)
    text = text.strip()
    
    # 确保句子以标点结尾
    if text and not text[-1] in '。！？':
        text += '。'
    
    return text

# 创建 Flask 应用
app = Flask(__name__)
app.config['SECRET_KEY'] = 'whisperlivekit-secret-key-2025'

# 创建 SocketIO 实例
socketio = SocketIO(app, cors_allowed_origins="*")

# 存储活跃连接和会话
active_connections = {}
transcription_sessions = {}

# 音频录制配置
AUDIO_CONFIG = {
    'chunk': 4096,  # 增加缓冲区大小，从1024改为4096
    'format': pyaudio.paFloat32,
    'channels': 1,
    'rate': 16000,
    'record_seconds': 3  # 每次录制3秒
}

class AudioRecorder:
    """音频录制器"""
    def __init__(self):
        self.audio = pyaudio.PyAudio()
        self.is_recording = False
        self.audio_queue = queue.Queue()
        self.recording_thread = None
        self.audio_buffer = []  # 音频缓冲区
        self.buffer_duration = 8.0  # 累积8秒音频再转写，让用户说完话
        self.sample_rate = 16000
        self.silence_threshold = 0.01  # 静音阈值
        self.silence_duration = 1.5  # 静音1.5秒后认为说话结束
        self.last_speech_time = 0  # 上次检测到语音的时间
        
    def start_recording(self):
        """开始录制"""
        if self.is_recording:
            return
            
        self.is_recording = True
        self.recording_thread = threading.Thread(target=self._record_audio)
        self.recording_thread.daemon = True
        self.recording_thread.start()
        logger.info("音频录制已开始")
        
    def stop_recording(self):
        """停止录制"""
        self.is_recording = False
        if self.recording_thread:
            self.recording_thread.join()
        logger.info("音频录制已停止")
        
    def _record_audio(self):
        """录制音频的线程函数"""
        try:
            stream = self.audio.open(
                format=AUDIO_CONFIG['format'],
                channels=AUDIO_CONFIG['channels'],
                rate=AUDIO_CONFIG['rate'],
                input=True,
                frames_per_buffer=AUDIO_CONFIG['chunk']
            )
            
            logger.info("麦克风已激活，开始录制...")
            import time
            
            while self.is_recording:
                try:
                    data = stream.read(AUDIO_CONFIG['chunk'])
                    audio_data = np.frombuffer(data, dtype=np.float32)
                    
                    # 检测语音活动
                    current_time = time.time()
                    audio_level = np.abs(audio_data).mean()
                    
                    if audio_level > self.silence_threshold:
                        # 检测到语音
                        self.last_speech_time = current_time
                    
                    # 累积音频数据
                    self.audio_buffer.extend(audio_data)
                    
                    # 智能转写：当累积足够时长且检测到静音时转写
                    buffer_samples = int(self.buffer_duration * self.sample_rate)
                    if (len(self.audio_buffer) >= buffer_samples and 
                        current_time - self.last_speech_time >= self.silence_duration):
                        
                        # 取出累积的音频数据
                        accumulated_audio = np.array(self.audio_buffer[:buffer_samples], dtype=np.float32)
                        self.audio_queue.put(accumulated_audio)
                        
                        # 清空缓冲区，重新开始
                        self.audio_buffer = []
                        self.last_speech_time = current_time
                        
                except Exception as e:
                    logger.error(f"音频录制错误: {e}")
                    break
                    
            stream.stop_stream()
            stream.close()
            
        except Exception as e:
            logger.error(f"音频流初始化失败: {e}")
    
    def get_audio_chunk(self):
        """获取音频数据块"""
        try:
            return self.audio_queue.get_nowait()
        except queue.Empty:
            return None
    
    def force_transcribe(self):
        """强制转写当前缓冲区的内容"""
        if len(self.audio_buffer) > 0:
            # 取出当前缓冲区的所有音频数据
            accumulated_audio = np.array(self.audio_buffer, dtype=np.float32)
            self.audio_queue.put(accumulated_audio)
            # 清空缓冲区
            self.audio_buffer = []
            self.last_speech_time = time.time()
            logger.info("手动触发转写")
            return True
        return False

class WhisperLiveKitApp:
    def __init__(self):
        self.whisper_model = None
        self.is_initialized = False
        self.model_size = "base"  # 默认使用 base 模型
        self.language = "zh"      # 默认中文
        self.audio_recorder = AudioRecorder()
        self.init_whisper()
    
    def init_whisper(self):
        """初始化 Whisper 模型"""
        try:
            logger.info("正在初始化 WhisperLiveKit...")
            
            # 尝试加载 WhisperLiveKit 模型
            try:
                # 使用 faster-whisper 作为后端
                from faster_whisper import WhisperModel
                
                logger.info("尝试加载本地模型...")
                
                # 直接加载本地模型
                self.whisper_model = WhisperModel(
                    model_size_or_path="./models/faster-whisper-base",  # 直接使用本地模型路径
                    device="cpu",  # 可以根据需要改为 "cuda"
                    compute_type="int8",  # 使用 int8 量化减少内存占用
                    cpu_threads=4,  # 增加CPU线程数
                    num_workers=1   # 工作线程数
                )
                logger.info("本地模型加载成功")
                self.is_initialized = True
                
            except ImportError:
                logger.warning("faster-whisper 未安装，使用模拟模式")
                self.is_initialized = True  # 模拟模式
                
        except Exception as e:
            logger.error(f"WhisperLiveKit 初始化失败: {e}")
            logger.info("切换到模拟模式，转写功能仍然可用")
            self.is_initialized = True  # 即使失败也启用模拟模式
    
    def transcribe_audio(self, audio_data, sample_rate=16000):
        """转写音频数据"""
        try:
            if not self.is_initialized or self.whisper_model is None:
                # 模拟转写结果
                return self._simulate_transcription(audio_data)
            
            # 使用真实模型转写
            segments, info = self.whisper_model.transcribe(
                audio_data,
                language="zh",  # 强制使用中文
                beam_size=5,           # 增加beam search宽度
                best_of=5,             # 选择最佳结果
                temperature=0.0,       # 确定性输出
                compression_ratio_threshold=2.4,  # 压缩比阈值
                log_prob_threshold=-1.0,         # 对数概率阈值
                no_speech_threshold=0.6,         # 无语音阈值
                condition_on_previous_text=True,  # 基于前文条件
                initial_prompt="这是一段中文语音",  # 初始提示
                vad_filter=True,
                vad_parameters=dict(
                    min_silence_duration_ms=500,
                    speech_pad_ms=400,
                    threshold=0.5
                )
            )
            
            # 处理转写结果
            transcription_results = []
            for segment in segments:
                text = segment.text.strip()
                if text:
                    # 转换为简体中文
                    simplified_text = convert_to_simplified(text)
                    # 后处理文本
                    processed_text = post_process_text(simplified_text)
                    
                    # 过滤低质量片段
                    if segment.avg_logprob > -1.5 and segment.no_speech_prob < 0.8:
                        result = {
                            "text": processed_text,
                            "original_text": text,
                            "simplified_text": simplified_text,
                            "confidence": segment.avg_logprob,
                            "no_speech_prob": segment.no_speech_prob,
                            "speaker": f"Speaker_{len(transcription_results) % 3 + 1}",
                            "timestamp": datetime.now().isoformat(),
                            "start": segment.start,
                            "end": segment.end,
                            "is_final": True
                        }
                        transcription_results.append(result)
                    else:
                        logger.warning(f"过滤低质量片段: {text} (置信度: {segment.avg_logprob:.3f})")
            
            return transcription_results
            
        except Exception as e:
            logger.error(f"音频转写失败: {e}")
            return self._simulate_transcription(audio_data)
    
    def _simulate_transcription(self, audio_data):
        """模拟转写（当真实模型不可用时）"""
        # 基于音频数据长度生成模拟结果
        audio_length = len(audio_data) if hasattr(audio_data, '__len__') else 1000
        
        # 模拟转写内容
        sample_texts = [
            "欢迎使用 WhisperLiveKit 实时语音转写系统",
            "这是一个基于 Flask 的简化版本",
            "支持实时语音转写和说话人分离功能",
            "系统运行正常，转写质量良好",
            "当前使用模拟模式，请安装 faster-whisper 获得真实转写"
        ]
        
        # 随机选择文本
        import random
        selected_text = random.choice(sample_texts)
        
        return [{
            "text": selected_text,
            "confidence": random.uniform(0.85, 0.98),
            "speaker": f"Speaker_{random.randint(1, 3)}",
            "timestamp": datetime.now().isoformat(),
            "start": 0.0,
            "end": audio_length / 16000.0,
            "is_final": True
        }]
    
    def start_real_time_transcription(self, client_id):
        """开始实时转写"""
        try:
            self.audio_recorder.start_recording()
            logger.info(f"客户端 {client_id} 开始实时转写")
            return True
        except Exception as e:
            logger.error(f"启动实时转写失败: {e}")
            return False
    
    def stop_real_time_transcription(self, client_id):
        """停止实时转写"""
        try:
            self.audio_recorder.stop_recording()
            logger.info(f"客户端 {client_id} 停止实时转写")
            return True
        except Exception as e:
            logger.error(f"停止实时转写失败: {e}")
            return False
    
    def process_real_time_audio(self, client_id):
        """处理实时音频数据"""
        try:
            audio_chunk = self.audio_recorder.get_audio_chunk()
            if audio_chunk is not None and len(audio_chunk) > 0:
                logger.info(f"收到音频数据: {len(audio_chunk)} 采样点")
                
                # 转写音频数据
                results = self.transcribe_audio(audio_chunk)
                if results:
                    logger.info(f"转写结果: {results}")
                else:
                    logger.info("转写结果为空")
                return results
            return None
        except Exception as e:
            logger.error(f"处理实时音频失败: {e}")
            return None

# 创建应用实例
whisper_app = WhisperLiveKitApp()

# Flask 路由
@app.route('/')
def index():
    """主页"""
    return render_template('index.html')

@app.route('/health')
def health_check():
    """健康检查接口"""
    return jsonify({
        "status": "healthy",
        "timestamp": datetime.now().isoformat(),
        "version": "0.2.6",
        "whisper_initialized": whisper_app.is_initialized,
        "model_size": whisper_app.model_size,
        "language": whisper_app.language,
        "audio_mode": "real_time"  # 显示为实时模式
    })

@app.route('/api/status')
def get_status():
    """获取系统状态"""
    return jsonify({
        "whisper_initialized": whisper_app.is_initialized,
        "active_connections": len(active_connections),
        "active_sessions": len(transcription_sessions),
        "model_info": {
            "size": whisper_app.model_size,
            "language": whisper_app.language,
            "device": "cpu"  # 可以根据实际情况调整
        },
        "audio_mode": "real_time",
        "microphone_status": whisper_app.audio_recorder.is_recording
    })

@app.route('/api/transcribe', methods=['POST'])
def transcribe_audio_file():
    """转写上传的音频文件"""
    try:
        if 'audio' not in request.files:
            return jsonify({'error': '没有音频文件'}), 400
        
        audio_file = request.files['audio']
        if audio_file.filename == '':
            return jsonify({'error': '没有选择文件'}), 400
        
        # 读取音频文件
        audio_data = audio_file.read()
        
        # 转换音频格式
        try:
            # 使用 pydub 处理音频
            audio = AudioSegment.from_file(io.BytesIO(audio_data))
            # 转换为 16kHz 单声道
            audio = audio.set_frame_rate(16000).set_channels(1)
            
            # 转换为 numpy 数组
            samples = np.array(audio.get_array_of_samples())
            if audio.sample_width == 2:
                samples = samples.astype(np.float32) / 32768.0
            elif audio.sample_width == 4:
                samples = samples.astype(np.float32) / 2147483648.0
            else:
                samples = samples.astype(np.float32) / 128.0
            
            # 转写音频
            results = whisper_app.transcribe_audio(samples, 16000)
            
            return jsonify({
                'success': True,
                'transcriptions': results,
                'audio_info': {
                    'duration': len(audio) / 1000.0,
                    'sample_rate': 16000,
                    'channels': 1
                }
            })
            
        except Exception as e:
            logger.error(f"音频处理失败: {e}")
            return jsonify({'error': f'音频处理失败: {str(e)}'}), 500
            
    except Exception as e:
        logger.error(f"转写请求失败: {e}")
        return jsonify({'error': f'转写失败: {str(e)}'}), 500

# SocketIO 事件处理
@socketio.on('connect')
def handle_connect():
    """客户端连接"""
    client_id = request.sid
    active_connections[client_id] = {
        'connected_at': datetime.now().isoformat(),
        'status': 'connected'
    }
    
    logger.info(f"客户端 {client_id} 已连接")
    
    # 发送连接确认
    emit('connection_established', {
        'client_id': client_id,
        'timestamp': datetime.now().isoformat(),
        'whisper_status': whisper_app.is_initialized,
        'model_info': {
            'size': whisper_app.model_size,
            'language': whisper_app.language
        },
        'audio_mode': 'real_time'
    })

@socketio.on('disconnect')
def handle_disconnect():
    """客户端断开连接"""
    client_id = request.sid
    
    # 清理连接
    if client_id in active_connections:
        del active_connections[client_id]
    
    if client_id in transcription_sessions:
        del transcription_sessions[client_id]
    
    logger.info(f"客户端 {client_id} 断开连接")

@socketio.on('start_transcription')
def handle_start_transcription(data):
    """开始转写"""
    client_id = request.sid
    
    # 初始化会话
    transcription_sessions[client_id] = {
        'start_time': datetime.now().isoformat(),
        'status': 'transcribing',
        'transcriptions': []
    }
    
    # 启动实时音频录制
    if whisper_app.start_real_time_transcription(client_id):
        logger.info(f"客户端 {client_id} 开始实时转写")
        
        # 发送确认
        emit('transcription_started', {
            'timestamp': datetime.now().isoformat(),
            'mode': 'real_time',
            'message': '麦克风已激活，开始说话...'
        })
        
        # 启动实时转写线程
        def real_time_transcription():
            while client_id in transcription_sessions and transcription_sessions[client_id]['status'] == 'transcribing':
                try:
                    results = whisper_app.process_real_time_audio(client_id)
                    if results:
                        for result in results:
                            # 保存到会话
                            if client_id in transcription_sessions:
                                transcription_sessions[client_id]['transcriptions'].append(result)
                            # 发送转写结果 - 使用 socketio.emit 而不是 emit
                            socketio.emit('transcription_result', result, room=client_id)
                    
                    socketio.sleep(1)  # 每秒检查一次
                except Exception as e:
                    logger.error(f"实时转写错误: {e}")
                    break
        
        # 启动实时转写线程
        socketio.start_background_task(real_time_transcription)
    else:
        emit('error', {'message': '启动实时转写失败'})

@socketio.on('stop_transcription')
def handle_stop_transcription(data):
    """停止转写"""
    client_id = request.sid
    
    if client_id in transcription_sessions:
        transcription_sessions[client_id]['status'] = 'stopped'
        transcription_sessions[client_id]['end_time'] = datetime.now().isoformat()
    
    # 停止实时音频录制
    whisper_app.stop_real_time_transcription(client_id)
    
    logger.info(f"客户端 {client_id} 停止转写")
    
    # 发送确认
    emit('transcription_stopped', {
        'timestamp': datetime.now().isoformat(),
        'message': '转写已停止，麦克风已关闭'
    })

@socketio.on('audio_data')
def handle_audio_data(data):
    """处理音频数据（保留兼容性）"""
    client_id = request.sid
    
    try:
        # 实时音频转写逻辑
        results = whisper_app.process_real_time_audio(client_id)
        
        if results:
            # 保存到会话
            if client_id in transcription_sessions:
                transcription_sessions[client_id]['transcriptions'].extend(results)
            
            # 发送转写结果
            for result in results:
                emit('transcription_result', result)
        
    except Exception as e:
        logger.error(f"处理音频数据时出错: {e}")
        emit('error', {'message': str(e)})

@socketio.on('clear_transcriptions')
def handle_clear_transcriptions(data):
    """清空转写记录"""
    client_id = request.sid
    
    if client_id in transcription_sessions:
        transcription_sessions[client_id]['transcriptions'] = []
    
    emit('transcriptions_cleared', {
        'timestamp': datetime.now().isoformat()
    })

@socketio.on('get_session_info')
def handle_get_session_info(data):
    """获取会话信息"""
    client_id = request.sid
    
    if client_id in transcription_sessions:
        session = transcription_sessions[client_id]
        emit('session_info', {
            'status': session['status'],
            'transcription_count': len(session['transcriptions']),
            'start_time': session.get('start_time'),
            'end_time': session.get('end_time'),
            'mode': 'real_time'
        })
    else:
        emit('session_info', {
            'status': 'no_session',
            'transcription_count': 0,
            'mode': 'real_time'
        })

@socketio.on('force_transcribe')
def handle_force_transcribe(data):
    """手动触发转写"""
    client_id = request.sid
    
    if client_id in transcription_sessions and transcription_sessions[client_id]['status'] == 'transcribing':
        # 强制转写当前缓冲区
        if whisper_app.audio_recorder.force_transcribe():
            emit('transcription_triggered', {
                'timestamp': datetime.now().isoformat(),
                'message': '手动触发转写成功'
            })
        else:
            emit('error', {'message': '没有音频数据可转写'})
    else:
        emit('error', {'message': '转写会话未激活'})

# 错误处理
@app.errorhandler(404)
def not_found(error):
    return jsonify({'error': 'Not found'}), 404

@app.errorhandler(500)
def internal_error(error):
    return jsonify({'error': 'Internal server error'}), 500

def create_directories():
    """创建必要的目录"""
    directories = ['temp', 'logs', 'models', 'uploads', 'templates', 'static']
    for directory in directories:
        os.makedirs(directory, exist_ok=True)
        logger.info(f"创建目录: {directory}")

if __name__ == '__main__':
    # 创建必要目录
    create_directories()
    
    # 启动应用
    logger.info("启动 WhisperLiveKit Flask 应用 (真实模式)...")
    logger.info("本地访问地址: http://localhost:5000")
    logger.info("内网访问地址: http://192.168.43.68:5000")
    logger.info("💡 提示: 现在支持实时麦克风输入转写！")
    logger.info("🌐 其他电脑可通过内网IP访问此应用")
    
    # 开发模式运行
    socketio.run(app, 
                host='0.0.0.0', 
                port=5000, 
                debug=True, 
                allow_unsafe_werkzeug=True)
