/*******************************************************************************
 * Copyright (c) 2007 - 2025 Maxprograms.
 *
 * This program and the accompanying materials
 * are made available under the terms of the Eclipse Public License 1.0
 * which accompanies this distribution, and is available at
 * https://www.eclipse.org/org/documents/epl-v10.html
 *
 * Contributors:
 *     Maxprograms - initial API and implementation
 *******************************************************************************/

interface DoubaoConfig {
    url: string;
    appKey: string;
    accessKey: string;
    resourceId: string;
    selectedMicrophoneId: string;
}

interface AudioConfig {
    format: string;
    sampleRate: number;
    bits: number;
    channel: number;
}

interface RecognitionResult {
    text: string;
    final: boolean;
    sequence: number;
}

class VoiceRecognition {
    private ws: WebSocket | null = null;
    private wsConnectionId: string | null = null;
    private mediaRecorder: MediaRecorder | null = null;
    private currentStream: MediaStream | null = null;
    private audioChunks: Blob[] = [];
    private isRecording: boolean = false;
    private hasInitialRequestSent: boolean = false; // 跟踪是否已发送初始请求
    private sequence: number = 1;
    private onResult: ((result: RecognitionResult) => void) | null = null;
    private onError: ((error: string) => void) | null = null;
    private onStatusChange: ((status: 'connecting' | 'connected' | 'recording' | 'processing' | 'disconnected' | 'error') => void) | null = null;
    private inputElement?: HTMLElement;
    
    // 协议常量
    private readonly PROTOCOL_VERSION = 0b0001;
    private readonly DEFAULT_HEADER_SIZE = 0b0001;
    private readonly FULL_CLIENT_REQUEST = 0b0001;
    private readonly AUDIO_ONLY_REQUEST = 0b0010;
    private readonly FULL_SERVER_RESPONSE = 0b1001;
    private readonly SERVER_ERROR_RESPONSE = 0b1111;
    private readonly POS_SEQUENCE = 0b0001;
    private readonly NEG_WITH_SEQUENCE = 0b0011;
    private readonly JSON_SERIALIZATION = 0b0001;
    private readonly GZIP_COMPRESSION = 0b0001;

    // Add shared AudioContext
    private audioContext: AudioContext | null = null;

    private config: DoubaoConfig = {
        url: 'wss://openspeech.bytedance.com/api/v3/sauc/bigmodel',
        appKey: '',
        accessKey: '',
        resourceId: 'volc.bigasr.sauc.duration',
        selectedMicrophoneId: ''
    };

    private audioConfig: AudioConfig = {
        format: 'pcm',
        sampleRate: 16000,
        bits: 16,
        channel: 1
    };

    constructor() {
        this.initializeConfig();
    }

    private async initializeConfig(): Promise<void> {
        await this.loadConfig();
    }

    public async loadConfig(): Promise<void> {
        // 从设置中加载豆包配置
        const electron = require('electron');
        try {
            const config = await electron.ipcRenderer.invoke('get-doubao-config');
            if (config) {
                this.config.appKey = config.appKey || '';
                this.config.accessKey = config.accessKey || '';
                this.config.url = config.url || 'wss://openspeech.bytedance.com/api/v3/sauc/bigmodel';
                this.config.resourceId = config.resourceId || 'volc.bigasr.sauc.duration';
                this.config.selectedMicrophoneId = config.selectedMicrophoneId || '';
                console.log('豆包配置加载成功:', {
                    appKey: this.config.appKey ? '已设置' : '未设置',
                    accessKey: this.config.accessKey ? '已设置' : '未设置',
                    selectedMicrophoneId: this.config.selectedMicrophoneId ? this.config.selectedMicrophoneId.substring(0, 8) + '...' : '使用默认麦克风'
                });
            } else {
                console.log('未找到豆包配置');
            }
        } catch (error) {
            console.error('加载豆包配置失败:', error);
        }
    }

    public setConfig(config: Partial<DoubaoConfig>): void {
        this.config = { ...this.config, ...config };
    }

    public setCallbacks(callbacks: {
        onResult?: (result: RecognitionResult) => void;
        onError?: (error: string) => void;
        onStatusChange?: (status: 'connecting' | 'connected' | 'recording' | 'processing' | 'disconnected' | 'error') => void;
    }): void {
        this.onResult = callbacks.onResult || null;
        this.onError = callbacks.onError || null;
        this.onStatusChange = callbacks.onStatusChange || null;
    }

    public async startRecording(inputElement?: HTMLElement): Promise<void> {
        console.log('🎤 [录音控制] startRecording被调用');
        
        if (this.isRecording) {
            console.log('🎤 [录音控制] 已经在录音，忽略调用');
            return;
        }

        this.inputElement = inputElement;
        this.isRecording = false; // 先设置为false，成功后再设置为true
        this.sequence = 1; // 从1开始，用于sendFullClientRequest
        this.hasInitialRequestSent = false; // 重置初始请求标志
        this.audioChunks = [];
        console.log('🎤 [录音控制] 状态已重置: sequence=1, hasInitialRequestSent=false');

        try {
            // 检查麦克风权限
            console.log('🎤 [录音控制] 检查麦克风权限...');
            if (typeof navigator !== 'undefined' && navigator.permissions) {
                const permission = await navigator.permissions.query({ name: 'microphone' as PermissionName });
                console.log('🎤 [录音控制] 麦克风权限状态:', permission.state);
            } else {
                const electron = require('electron');
                const permissionResult = await electron.ipcRenderer.invoke('request-microphone-permission');
                if (!permissionResult.success) {
                    throw new Error(permissionResult.error);
                }
            }

            console.log('🎤 [录音控制] 连接WebSocket...');
            this.updateStatus('connecting');
            await this.connectWebSocket();
            
            console.log('🎤 [录音控制] 初始化音频录制...');
            console.log('🎤 [录音控制] 🔥🔥🔥 即将调用initializeAudioRecording');
            
            // 强制输出到主进程
            try {
                const electron = require('electron');
                electron.ipcRenderer.send('log-to-main', '🎤 [录音控制] 🔥🔥🔥 开始初始化音频录制');
            } catch (e) {}
            
            await this.initializeAudioRecording();
            console.log('🎤 [录音控制] 🔥🔥🔥 initializeAudioRecording调用完成');
            
            console.log('🎤 [录音控制] 🔥🔥🔥 initializeAudioRecording完成');
            
            // 强制输出到主进程
            try {
                const electron = require('electron');
                electron.ipcRenderer.send('log-to-main', '🎤 [录音控制] 🔥🔥🔥 音频录制初始化完成');
            } catch (e) {}
            
            console.log('🎤 [录音控制] 录音启动成功');
            this.updateStatus('recording');
            this.isRecording = true;
        } catch (error) {
            console.error('🎤 [录音控制] 录音启动失败:', error);
            
            // 强制输出错误到主进程
            try {
                const electron = require('electron');
                electron.ipcRenderer.send('log-to-main', '🎤 [录音控制] 录音启动失败: ' + (error as any).message);
            } catch (e) {}
            
            this.updateStatus('error');
            throw error;
        }
    }

    public stopRecording(): void {
        console.log(`[录音控制] stopRecording被调用, 当前状态: isRecording=${this.isRecording}`);
        
        if (!this.isRecording) {
            console.log('[录音控制] 已经停止录音，忽略调用');
            return;
        }

        console.log('[录音控制] 开始停止录音流程');
        this.isRecording = false;
        this.updateStatus('processing');

        if (this.mediaRecorder && this.mediaRecorder.state === 'recording') {
            console.log('[录音控制] 停止MediaRecorder');
            this.mediaRecorder.stop();
        }

        // 发送最后一包音频数据
        setTimeout(() => {
            console.log('[录音控制] 发送最终音频包');
            this.sendFinalAudioPacket();
            // 发送完最终包后重置标志，为下次录音做准备
            this.hasInitialRequestSent = false;
            console.log('[录音控制] 初始请求标志已重置');
        }, 500);
    }

    private async connectWebSocket(): Promise<void> {
        // 通过Electron主进程建立WebSocket连接，支持自定义headers
        const electron = require('electron');
        const connectId = this.generateUUID();
        
        try {
            const connectionResult = await electron.ipcRenderer.invoke('create-doubao-websocket', {
                url: this.config.url,
                appKey: this.config.appKey,
                accessKey: this.config.accessKey,
                resourceId: this.config.resourceId,
                connectId: connectId
            });

            if (!connectionResult.success) {
                throw new Error(connectionResult.error || 'WebSocket连接失败');
            }

            console.log('豆包语音识别WebSocket连接已建立');
            this.updateStatus('connected');
            this.wsConnectionId = connectId;
            
            // 监听WebSocket消息
            electron.ipcRenderer.on(`doubao-ws-message-${connectId}`, (event: any, data: ArrayBuffer) => {
                this.handleBinaryMessage(new Uint8Array(data));
            });

            // 监听WebSocket关闭
            electron.ipcRenderer.on(`doubao-ws-close-${connectId}`, (event: any, { code, reason }: { code: number, reason: string }) => {
                console.log('WebSocket连接已关闭:', { code, reason });
                
                if (code === 1000 && reason === 'finish last sequence') {
                    console.log('正常会话完成');
                    this.updateStatus('disconnected');
                } else {
                    this.updateStatus('error');
                    this.notifyError(`连接异常关闭: code ${code}, reason: ${reason}`);
                }
            });

            // 监听WebSocket错误
            electron.ipcRenderer.on(`doubao-ws-error-${connectId}`, (event: any, error: string) => {
                console.error('WebSocket错误:', error);
                this.updateStatus('error');
                this.notifyError('WebSocket连接失败: ' + error);
            });

            // 连接建立成功，等待录音开始时发送初始请求
            console.log('🎤 [协议] WebSocket连接建立成功，等待录音开始');
            
        } catch (error: any) {
            console.error('创建WebSocket连接失败:', error);
            this.updateStatus('error');
            throw new Error('无法连接到豆包语音识别服务: ' + error.message);
        }
    }

    private async initializeAudioRecording(): Promise<void> {
        console.log('🎤 [音频初始化] 🔥🔥🔥 initializeAudioRecording方法开始执行');
        
        // 强制输出到主进程
        try {
            const electron = require('electron');
            electron.ipcRenderer.send('log-to-main', '🎤 [音频初始化] 🔥🔥🔥 进入音频初始化方法');
        } catch (e) {}
        
        console.log('🎤 [调试] 检查navigator对象...');
        console.log('🎤 [调试] navigator存在:', typeof navigator !== 'undefined');
        console.log('🎤 [调试] navigator.mediaDevices存在:', typeof navigator !== 'undefined' && typeof navigator.mediaDevices !== 'undefined');
        
        // 强制输出到主进程
        try {
            const electron = require('electron');
            electron.ipcRenderer.send('log-to-main', '🎤 [调试] 检查navigator对象...');
            electron.ipcRenderer.send('log-to-main', `🎤 [调试] navigator存在: ${typeof navigator !== 'undefined'}`);
            electron.ipcRenderer.send('log-to-main', `🎤 [调试] navigator.mediaDevices存在: ${typeof navigator !== 'undefined' && typeof navigator.mediaDevices !== 'undefined'}`);
        } catch (e) {}
        
        try {
            // 首先检查浏览器是否支持getUserMedia
            if (!navigator.mediaDevices || !navigator.mediaDevices.getUserMedia) {
                console.log('🎤 [错误] 浏览器不支持getUserMedia');
                throw new Error('您的浏览器不支持音频录制功能');
            }

            console.log('🎤 [调试] getUserMedia支持检查通过');
            console.log('🎤 [音频初始化] 请求麦克风权限...');
            
            // 强制输出到主进程
            try {
                const electron = require('electron');
                electron.ipcRenderer.send('log-to-main', '🎤 [调试] getUserMedia支持检查通过');
                electron.ipcRenderer.send('log-to-main', '🎤 [音频初始化] 请求麦克风权限...');
            } catch (e) {}
            
            console.log('🎤 [调试] 开始获取设备列表...');
            // 强制输出到主进程
            try {
                const electron = require('electron');
                electron.ipcRenderer.send('log-to-main', '🎤 [调试] 开始获取设备列表...');
            } catch (e) {}
            
            let devices;
            try {
                console.log('🎤 [调试] 调用enumerateDevices...');
                devices = await navigator.mediaDevices.enumerateDevices();
                console.log('🎤 [调试] enumerateDevices调用完成');
            } catch (error) {
                console.error('🎤 [错误] enumerateDevices失败:', error);
                // 强制输出到主进程
                try {
                    const electron = require('electron');
                    electron.ipcRenderer.send('log-to-main', '🎤 [错误] enumerateDevices失败: ' + error);
                } catch (e) {}
                throw error;
            }
            
            console.log('🎤 [调试] 设备列表获取成功，总设备数:', devices.length);
            
            // 强制输出到主进程
            try {
                const electron = require('electron');
                electron.ipcRenderer.send('log-to-main', `🎤 [调试] 设备列表获取成功，总设备数: ${devices.length}`);
            } catch (e) {}
            
            const audioInputs = devices.filter(device => device.kind === 'audioinput');
            console.log('🎤 [设备检测] 可用麦克风设备:', audioInputs.length, '个');
            
            // 强制输出到主进程
            try {
                const electron = require('electron');
                electron.ipcRenderer.send('log-to-main', `🎤 [设备检测] 可用麦克风设备: ${audioInputs.length} 个`);
            } catch (e) {}
            
            // 详细显示每个麦克风设备
            audioInputs.forEach((device, index) => {
                const isSelected = this.config.selectedMicrophoneId === device.deviceId;
                const deviceInfo = `🎤 [设备${index + 1}] ${isSelected ? '★当前选中★' : ''} 
                    ID: ${device.deviceId}
                    名称: ${device.label || '默认麦克风'}
                    组ID: ${device.groupId}`;
                console.log(deviceInfo);
                
                // 强制输出到主进程
                try {
                    const electron = require('electron');
                    electron.ipcRenderer.send('log-to-main', `🎤 [设备${index + 1}] ${device.label || '默认麦克风'} (ID: ${device.deviceId.substring(0, 8)}...)`);
                } catch (e) {}
            });
            
            // 使用用户选择的麦克风或默认麦克风
            const audioConstraints: MediaStreamConstraints['audio'] = {
                sampleRate: this.audioConfig.sampleRate,
                channelCount: this.audioConfig.channel,
                echoCancellation: true,
                noiseSuppression: true,
                autoGainControl: true
            };
            
            // 如果用户选择了特定的麦克风设备
            if (this.config.selectedMicrophoneId) {
                console.log('使用指定的麦克风设备:', this.config.selectedMicrophoneId.substring(0, 8) + '...');
                (audioConstraints as any).deviceId = { exact: this.config.selectedMicrophoneId };
            } else {
                console.log('使用默认麦克风设备');
            }
            
            console.log('🎤 [调试] 即将调用getUserMedia，约束条件:', JSON.stringify(audioConstraints, null, 2));
            // 强制输出到主进程
            try {
                const electron = require('electron');
                electron.ipcRenderer.send('log-to-main', '🎤 [调试] 即将调用getUserMedia...');
            } catch (e) {}
            
            let stream;
            try {
                console.log('🎤 [调试] 执行getUserMedia调用...');
                stream = await navigator.mediaDevices.getUserMedia({
                    audio: audioConstraints
                });
                console.log('🎤 [调试] getUserMedia调用成功');
                // 强制输出到主进程
                try {
                    const electron = require('electron');
                    electron.ipcRenderer.send('log-to-main', '🎤 [调试] getUserMedia调用成功');
                } catch (e) {}
                
                console.log('🎤 [调试] 即将处理音频流...');
                // 强制输出到主进程
                try {
                    const electron = require('electron');
                    electron.ipcRenderer.send('log-to-main', '🎤 [调试] 即将处理音频流...');
                } catch (e) {}
            } catch (error) {
                console.error('🎤 [错误] getUserMedia失败:', error);
                // 强制输出到主进程
                try {
                    const electron = require('electron');
                    electron.ipcRenderer.send('log-to-main', '🎤 [错误] getUserMedia失败: ' + error);
                } catch (e) {}
                throw error;
            }
            
            console.log('🎤 [权限] 麦克风权限获取成功');
            
            // 强制输出到主进程
            try {
                const electron = require('electron');
                electron.ipcRenderer.send('log-to-main', '🎤 [权限] 麦克风权限获取成功');
            } catch (e) {}
            
            console.log('🎤 [调试] 准备检查音频轨道...');
            // 检查实际使用的设备
            const tracks = stream.getAudioTracks();
            console.log('🎤 [调试] 音频轨道数量:', tracks.length);
            
            if (tracks.length > 0) {
                console.log('🎤 [调试] 开始获取设备设置...');
                const track = tracks[0];
                const settings = track.getSettings();
                console.log('🎤 [实际设备] 正在使用的麦克风:');
                console.log(`   设备ID: ${settings.deviceId}`);
                console.log(`   标签: ${track.label}`);
                console.log(`   采样率: ${settings.sampleRate}Hz`);
                console.log(`   声道数: ${settings.channelCount}`);
                console.log(`   音量控制: ${settings.autoGainControl ? '开启' : '关闭'}`);
                console.log(`   噪声抑制: ${settings.noiseSuppression ? '开启' : '关闭'}`);
                console.log(`   回声消除: ${settings.echoCancellation ? '开启' : '关闭'}`);
                
                // 强制输出到主进程
                try {
                    const electron = require('electron');
                    electron.ipcRenderer.send('log-to-main', `🎤 [实际设备] 设备ID: ${settings.deviceId}`);
                    electron.ipcRenderer.send('log-to-main', `🎤 [实际设备] 标签: ${track.label}`);
                    electron.ipcRenderer.send('log-to-main', `🎤 [实际设备] 采样率: ${settings.sampleRate}Hz`);
                } catch (e) {}
            } else {
                console.log('🎤 [错误] 没有找到音频轨道！');
                // 强制输出到主进程
                try {
                    const electron = require('electron');
                    electron.ipcRenderer.send('log-to-main', '🎤 [错误] 没有找到音频轨道！');
                } catch (e) {}
            }

            // 创建音量监测
            this.setupAudioLevelMonitoring(stream);
            
            console.log('🎤 [调试] 准备设置音频录制器...');
            // 强制输出到主进程
            try {
                const electron = require('electron');
                electron.ipcRenderer.send('log-to-main', '🎤 [调试] 准备设置音频录制器...');
            } catch (e) {}

            // Create shared AudioContext
            this.audioContext = new (window.AudioContext || (window as any).webkitAudioContext)();
            console.log('Shared AudioContext created');

            // 尝试使用最兼容的音频格式
            let options;
            if (MediaRecorder.isTypeSupported('audio/wav')) {
                options = { mimeType: 'audio/wav' };
                console.log('🎤 [格式] 使用WAV格式');
            } else if (MediaRecorder.isTypeSupported('audio/webm;codecs=opus')) {
                options = { 
                    mimeType: 'audio/webm;codecs=opus',
                    audioBitsPerSecond: 16000 
                };
                console.log('🎤 [格式] 使用WebM Opus格式（更兼容）');
            } else if (MediaRecorder.isTypeSupported('audio/webm')) {
                options = { mimeType: 'audio/webm' };
                console.log('🎤 [格式] 使用WebM默认格式');
            } else {
                options = {};
                console.log('🎤 [格式] 使用系统默认格式');
            }
            
            this.mediaRecorder = new MediaRecorder(stream, options);

            console.log('🎤 [调试] MediaRecorder已创建');
            console.log('🎤 [调试] MIME类型:', this.mediaRecorder.mimeType);
            console.log('🎤 [调试] 初始状态:', this.mediaRecorder.state);
            
            // 强制输出到主进程
            try {
                const electron = require('electron');
                electron.ipcRenderer.send('log-to-main', `🎤 [调试] MediaRecorder已创建，MIME: ${this.mediaRecorder.mimeType}`);
                electron.ipcRenderer.send('log-to-main', `🎤 [调试] 初始状态: ${this.mediaRecorder.state}`);
            } catch (e) {}

            this.audioChunks = [];

            this.mediaRecorder.ondataavailable = (event) => {
                console.log('🎤 [音频数据] 收到音频数据块，大小:', event.data.size, '字节');
                
                // 强制输出到主进程
                try {
                    const electron = require('electron');
                    electron.ipcRenderer.send('log-to-main', `🎤 [音频数据] 收到音频数据块，大小: ${event.data.size} 字节`);
                } catch (e) {}
                
                // 🔥 强制调试：检查条件
                console.log('🔥 [条件检查] event.data.size:', event.data.size);
                console.log('🔥 [条件检查] event.data.size > 0:', event.data.size > 0);
                console.log('🔥 [条件检查] typeof event.data.size:', typeof event.data.size);
                
                // 强制输出到主进程
                try {
                    const electron = require('electron');
                    electron.ipcRenderer.send('log-to-main', `🔥 [条件检查] event.data.size: ${event.data.size}, > 0: ${event.data.size > 0}`);
                } catch (e) {}
                
                if (event.data.size > 0) {
                    console.log('🔥 [条件通过] 进入音频处理分支');
                    
                    // 强制输出到主进程
                    try {
                        const electron = require('electron');
                        electron.ipcRenderer.send('log-to-main', '🔥 [条件通过] 进入音频处理分支');
                    } catch (e) {}
                    
                    this.audioChunks.push(event.data);
                    
                    // 分析音频数据格式
                    console.log('🎤 [格式分析] 数据类型:', event.data.type);
                    console.log('🎤 [格式分析] MIME类型:', this.mediaRecorder?.mimeType);
                    
                    // 强制输出到主进程
                    try {
                        const electron = require('electron');
                        electron.ipcRenderer.send('log-to-main', `🎤 [格式分析] 数据类型: ${event.data.type}`);
                        electron.ipcRenderer.send('log-to-main', `🎤 [格式分析] MIME类型: ${this.mediaRecorder?.mimeType}`);
                    } catch (e) {}
                    
                    console.log('🔥 [即将调用] processAudioChunk...');
                    
                    // 强制输出到主进程
                    try {
                        const electron = require('electron');
                        electron.ipcRenderer.send('log-to-main', '🔥 [即将调用] processAudioChunk...');
                    } catch (e) {}
                    
                    // 处理异步函数调用，添加错误捕获
                    this.processAudioChunk(event.data).then(() => {
                        console.log('🔥 [调用完成] processAudioChunk 成功完成');
                        
                        // 强制输出到主进程
                        try {
                            const electron = require('electron');
                            electron.ipcRenderer.send('log-to-main', '🔥 [调用完成] processAudioChunk 成功完成');
                        } catch (e) {}
                    }).catch((error) => {
                        console.error('🔥 [调用错误] processAudioChunk 执行失败:', error);
                        console.error('🔥 [调用错误] 错误详情:', error.message);
                        console.error('🔥 [调用错误] 错误堆栈:', error.stack);
                        
                        // 强制输出到主进程
                        try {
                            const electron = require('electron');
                            electron.ipcRenderer.send('log-to-main', `🔥 [调用错误] processAudioChunk 失败: ${error.message}`);
                        } catch (e) {}
                    });
                } else {
                    console.log('🔥 [条件失败] 音频数据大小为0或无效');
                    console.log('🎤 [警告] 收到空的音频数据块！');
                    
                    // 强制输出到主进程
                    try {
                        const electron = require('electron');
                        electron.ipcRenderer.send('log-to-main', '🔥 [条件失败] 音频数据大小为0或无效');
                        electron.ipcRenderer.send('log-to-main', '🎤 [警告] 收到空的音频数据块！');
                    } catch (e) {}
                }
            };

            this.mediaRecorder.onstop = () => {
                stream.getTracks().forEach(track => track.stop());
                console.log('音频流已释放');
            };

            // 保存stream引用以便后续清理
            this.currentStream = stream;

            // 在开始录音时发送初始请求，与Java实现保持一致
            console.log('🎤 [音频初始化] 准备发送初始客户端请求...');
            if (!this.hasInitialRequestSent) {
                this.hasInitialRequestSent = true;
                this.sendFullClientRequest();
                console.log('🎤 [音频初始化] 已发送初始客户端请求');
            }
            
            // 每100ms收集一次音频数据，提高实时性
            console.log('🎤 [调试] 准备启动MediaRecorder，间隔100ms...');
            this.mediaRecorder.start(100);  // 减少到100毫秒，提高实时性
            console.log('🎤 [调试] MediaRecorder.start()已调用');
            console.log('🎤 [调试] 当前状态:', this.mediaRecorder.state);
            console.log('🎤 [音频初始化] 音频录制已开始');
            
            // 强制输出到主进程
            try {
                const electron = require('electron');
                electron.ipcRenderer.send('log-to-main', '🎤 [调试] MediaRecorder.start()已调用');
                electron.ipcRenderer.send('log-to-main', `🎤 [调试] 当前状态: ${this.mediaRecorder.state}`);
            } catch (e) {}

        } catch (error: any) {
            console.error('🎤 [错误] 获取麦克风权限失败:', error);
            console.error('🎤 [错误] 错误类型:', error.name);
            console.error('🎤 [错误] 错误消息:', error.message);
            console.error('🎤 [错误] 完整错误对象:', error);
            
            if (error.name === 'NotAllowedError') {
                throw new Error('麦克风权限被拒绝。请在macOS系统偏好设置 > 安全性与隐私 > 隐私 > 麦克风中，允许Electron访问麦克风，然后重启应用。');
            } else if (error.name === 'NotFoundError') {
                throw new Error('未找到麦克风设备。请确保您的设备连接了麦克风。');
            } else if (error.name === 'NotSupportedError') {
                throw new Error('您的浏览器不支持音频录制功能。');
            } else if (error.name === 'OverconstrainedError') {
                throw new Error('指定的麦克风设备不可用，请在设置中选择其他麦克风设备。');
            } else {
                throw new Error('无法获取麦克风权限: ' + error.message + '。如果在macOS上，请检查系统隐私设置。');
            }
        }
        
        console.log('🎤 [调试] initializeAudioRecording方法执行完毕');
    }

    private setupAudioLevelMonitoring(stream: MediaStream): void {
        console.log('🔊 [音频监测] 开始设置音频音量监测');
        
        if (!this.audioContext) {
            console.warn('🔊 [音频监测] AudioContext未创建，跳过音量监测');
            return;
        }

        try {
            const source = this.audioContext.createMediaStreamSource(stream);
            const analyser = this.audioContext.createAnalyser();
            analyser.fftSize = 256;
            
            source.connect(analyser);
            
            const bufferLength = analyser.frequencyBinCount;
            const dataArray = new Uint8Array(bufferLength);
            
            console.log('🔊 [音频监测] 音量监测器创建成功');
            
            const checkAudioLevel = () => {
                if (!this.isRecording) return;
                
                analyser.getByteFrequencyData(dataArray);
                const average = dataArray.reduce((a, b) => a + b) / dataArray.length;
                
                if (average > 10) {  // 降低阈值以便更容易检测到声音
                    console.log(`🔊 [音频监测] 检测到声音，平均音量: ${average.toFixed(1)}`);
                } else {
                    console.log(`🔊 [音频监测] 环境音量: ${average.toFixed(1)} (静音)`);
                }
                
                setTimeout(checkAudioLevel, 500);  // 每500ms检查一次
            };
            
            checkAudioLevel();
        } catch (error) {
            console.error('🔊 [音频监测] 设置音量监测失败:', error);
        }
    }

    private async processAudioChunk(audioBlob: Blob): Promise<void> {
        try {
            console.log(`🎤 [音频块] 大小: ${audioBlob.size} bytes, 类型: ${audioBlob.type}`);
            
            // 强制输出到主进程
            try {
                const electron = require('electron');
                electron.ipcRenderer.send('log-to-main', `🎤 [音频块] 开始处理，大小: ${audioBlob.size} bytes, 类型: ${audioBlob.type}`);
            } catch (e) {}
            
            // 检查音频数据是否为空
            if (audioBlob.size === 0) {
                console.warn('🎤 [音频块] 音频数据为空，跳过处理');
                return;
            }
            
            console.log('🎤 [音频块] 开始转换为ArrayBuffer...');
            const arrayBuffer = await audioBlob.arrayBuffer();
            console.log(`🎤 [音频块] ArrayBuffer转换完成，大小: ${arrayBuffer.byteLength} bytes`);
            
            // 强制输出到主进程
            try {
                const electron = require('electron');
                electron.ipcRenderer.send('log-to-main', `🎤 [音频块] ArrayBuffer大小: ${arrayBuffer.byteLength} bytes`);
            } catch (e) {}
            
            // 检查ArrayBuffer是否为空
            if (arrayBuffer.byteLength === 0) {
                console.warn('🎤 [音频块] ArrayBuffer为空，跳过处理');
                return;
            }
            
            console.log('🎤 [音频块] 即将调用convertToTitPCM...');
            const audioData = await this.convertToTitPCM(arrayBuffer);
            console.log('🎤 [音频块] convertToTitPCM调用完成');
            
            if (audioData && audioData.length > 0) {
                console.log(`🎤 [音频发送] PCM数据大小: ${audioData.length} bytes, 即将发送序列号: ${this.sequence + 1}`);
                
                // 强制输出到主进程
                try {
                    const electron = require('electron');
                    electron.ipcRenderer.send('log-to-main', `🎤 [音频发送] PCM数据大小: ${audioData.length} bytes`);
                } catch (e) {}
                
                this.sendAudioSegment(audioData, false);
                console.log('🎤 [音频发送] 音频段发送完成');
            } else {
                console.warn('🎤 [音频块] 转换失败或数据为空');
                
                // 强制输出到主进程
                try {
                    const electron = require('electron');
                    electron.ipcRenderer.send('log-to-main', '🎤 [音频块] 转换失败或数据为空');
                } catch (e) {}
            }
        } catch (error) {
            console.error('🎤 [音频块] 处理音频数据错误:', error);
            
            // 强制输出到主进程
            try {
                const electron = require('electron');
                electron.ipcRenderer.send('log-to-main', `🎤 [音频块] 处理错误: ${error}`);
            } catch (e) {}
        }
    }

    private async convertToTitPCM(audioData: ArrayBuffer): Promise<Uint8Array | null> {
        try {
            console.log('🎤 [转换] 开始转换音频数据，原始大小:', audioData.byteLength, '字节');
            
            // 强制输出到主进程
            try {
                const electron = require('electron');
                electron.ipcRenderer.send('log-to-main', `🎤 [转换] 开始转换音频数据，原始大小: ${audioData.byteLength} 字节`);
            } catch (e) {}
            
            // 🔥 最终解决方案：直接发送原始WebM/Opus数据，不进行任何转换
            console.log('🎤 [转换] 采用最终解决方案：直接发送原始WebM/Opus数据');
            const rawData = new Uint8Array(audioData);
            console.log('🎤 [转换] 直接返回原始音频数据，大小:', rawData.length, '字节');
            
            // 强制输出到主进程
            try {
                const electron = require('electron');
                electron.ipcRenderer.send('log-to-main', `🎤 [转换] 最终方案：直接发送原始WebM/Opus数据，大小: ${rawData.length} 字节`);
            } catch (e) {}
            
            return rawData;
            
        } catch (error) {
            console.error('音频转换错误:', error);
            return null;
        }
    }

    private floatTo16BitPCM(float32Array: Float32Array): Uint8Array {
        const buffer = new ArrayBuffer(float32Array.length * 2);
        const view = new DataView(buffer);
        let offset = 0;
        
        for (let i = 0; i < float32Array.length; i++, offset += 2) {
            const sample = Math.max(-1, Math.min(1, float32Array[i]));
            view.setInt16(offset, sample < 0 ? sample * 0x8000 : sample * 0x7FFF, true);
        }
        
        return new Uint8Array(buffer);
    }

    private sendFullClientRequest(): void {
        console.log('🎤 [协议] 发送完整客户端请求，序列号:', this.sequence);
        
        const payload = {
            user: {
                uid: "test"  // 与Java客户端保持一致
            },
            audio: {
                format: "opus",           // 修正：告诉服务器发送Opus格式
                sample_rate: this.audioConfig.sampleRate,
                bits: this.audioConfig.bits,
                channel: this.audioConfig.channel,
                codec: "opus"             // 保持opus编解码器
            },
            request: {
                model_name: "bigmodel",
                enable_punc: true
                // 移除额外的字段，与Java客户端保持一致
            }
        };

        const payloadStr = JSON.stringify(payload);
        const payloadBytes = this.gzipCompress(new TextEncoder().encode(payloadStr));
        
        const header = this.getHeader(
            this.FULL_CLIENT_REQUEST,
            this.POS_SEQUENCE,
            this.JSON_SERIALIZATION,
            0, // 暂时禁用GZIP压缩
            0
        );
        
        // 使用当前序列号，初始请求不递增序列号
        const seqBytes = this.intToBytes(this.sequence);
        console.log('🎤 [协议] 发送客户端请求使用序列号:', this.sequence);
        // 注意：初始请求不递增序列号，与Java实现保持一致
        
        const payloadSize = this.intToBytes(payloadBytes.length);
        
        const fullRequest = new Uint8Array(
            header.length + seqBytes.length + payloadSize.length + payloadBytes.length
        );
        
        let offset = 0;
        fullRequest.set(header, offset);
        offset += header.length;
        fullRequest.set(seqBytes, offset);
        offset += seqBytes.length;
        fullRequest.set(payloadSize, offset);
        offset += payloadSize.length;
        fullRequest.set(payloadBytes, offset);
        
        if (this.wsConnectionId) {
            const electron = require('electron');
            electron.ipcRenderer.send('doubao-ws-send', { 
                connectId: this.wsConnectionId, 
                data: fullRequest.buffer 
            });
            console.log('🎤 [协议] 已发送完整客户端请求');
        }
    }

    private sendAudioSegment(audioData: Uint8Array, isLast: boolean): void {
        // 在发送前先递增序列号，与Java实现保持一致
        this.sequence++;
        
        const messageTypeSpecificFlags = isLast ? this.NEG_WITH_SEQUENCE : this.POS_SEQUENCE;
        const currentSeq = isLast ? -this.sequence : this.sequence;
        
        const header = this.getHeader(
            this.AUDIO_ONLY_REQUEST,
            messageTypeSpecificFlags,
            this.JSON_SERIALIZATION, // 与Java客户端保持一致
            0, // 暂时禁用GZIP压缩
            0
        );
        
        const seqBytes = this.intToBytes(currentSeq);
        const compressedAudio = this.gzipCompress(audioData);
        const payloadSize = this.intToBytes(compressedAudio.length);
        
        const audioRequest = new Uint8Array(
            header.length + seqBytes.length + payloadSize.length + compressedAudio.length
        );
        
        let offset = 0;
        audioRequest.set(header, offset);
        offset += header.length;
        audioRequest.set(seqBytes, offset);
        offset += seqBytes.length;
        audioRequest.set(payloadSize, offset);
        offset += payloadSize.length;
        audioRequest.set(compressedAudio, offset);
        
        if (this.wsConnectionId) {
            const electron = require('electron');
            electron.ipcRenderer.send('doubao-ws-send', { 
                connectId: this.wsConnectionId, 
                data: audioRequest.buffer 
            });
            console.log(`已发送音频分段: 序号${currentSeq}, 长度${audioData.length}, 最后一段:${isLast}`);
        }
    }

    private sendFinalAudioPacket(): void {
        console.log('[音频结束] 发送最终音频包 (空数据, isLast=true)');
        // 发送空的最后一包
        this.sendAudioSegment(new Uint8Array(0), true);
    }

    private handleBinaryMessage(data: Uint8Array): void {
        try {
            console.log(`[消息接收] 收到二进制消息，长度: ${data.length} bytes`);
            console.log(`[消息接收] 消息数据前16字节:`, Array.from(data.slice(0, 16)).map(b => b.toString(16).padStart(2, '0')).join(' '));
            
            const result = this.parseResponse(data);
            if (result) {
                console.log(`[消息处理] 解析成功，结果:`, result);
                if (this.onResult) {
                    this.onResult(result);
                }
                
                // 如果是最后一包，关闭连接
                if (result.sequence < 0) {
                    this.disconnect();
                }
            } else {
                console.log(`[消息处理] parseResponse返回null，可能是非识别结果消息`);
            }
        } catch (error) {
            console.error('解析响应错误:', error);
            this.notifyError('解析语音识别结果失败');
        }
    }

    private parseResponse(data: Uint8Array): RecognitionResult | null {
        console.log(`[响应解析] 开始解析响应，数据长度: ${data.length}`);
        
        if (data.length < 12) {
            console.log(`[响应解析] 数据长度不足12字节，跳过`);
            return null;
        }

        // 解析头部
        const messageType = (data[1] >> 4) & 0x0f;
        const messageCompression = data[2] & 0x0f;
        
        console.log(`[响应解析] 消息类型: ${messageType}, 压缩类型: ${messageCompression}`);
        console.log(`[响应解析] 期望的FULL_SERVER_RESPONSE: ${this.FULL_SERVER_RESPONSE}`);
        
        // 解析序列号
        const sequence = this.bytesToInt(data.slice(4, 8));
        console.log(`[响应解析] 序列号: ${sequence}`);
        
        // 解析payload
        const payloadSize = this.bytesToInt(data.slice(8, 12));
        console.log(`[响应解析] Payload大小: ${payloadSize}`);
        
        if (payloadSize > data.length - 12) {
            console.log(`[响应解析] Payload大小超出数据范围，数据可能不完整`);
            return null;
        }
        
        const payload = data.slice(12, 12 + payloadSize);
        console.log(`[响应解析] Payload前50字节:`, Array.from(payload.slice(0, 50)).map(b => String.fromCharCode(b)).join(''));
        
        let payloadStr = '';
        if (messageType === this.FULL_SERVER_RESPONSE) {
            console.log(`[响应解析] 这是FULL_SERVER_RESPONSE消息`);
            
            if (messageCompression === this.GZIP_COMPRESSION) {
                console.log(`[响应解析] 使用GZIP解压`);
                payloadStr = new TextDecoder().decode(this.gzipDecompress(payload));
            } else {
                console.log(`[响应解析] 不使用压缩，直接解码`);
                payloadStr = new TextDecoder().decode(payload);
            }
            
            console.log(`[响应解析] 解码后的payload:`, payloadStr);
            
            try {
                const result = JSON.parse(payloadStr);
                console.log('识别结果:', result);
                
                // 根据豆包实际返回的JSON格式进行解析
                // 格式: {"audio_info":{"duration":180},"result":{"additions":{"log_id":"..."},"text":"识别结果"}}
                if (result.result && typeof result.result.text === 'string') {
                    const text = result.result.text.trim();
                    console.log(`[响应解析] 找到识别文本: "${text}"`);
                    
                    // 即使文本为空也返回结果，这是正常的（可能是静音段）
                    return {
                        text: text,
                        final: sequence < 0,
                        sequence: Math.abs(sequence)
                    };
                } else {
                    console.log(`[响应解析] JSON中没有找到result.text字段`);
                    console.log(`[响应解析] result结构:`, result.result ? Object.keys(result.result) : 'result字段不存在');
                    
                    // 检查是否是错误消息
                    if (result.error) {
                        console.error(`[响应解析] 服务器错误:`, result.error);
                        this.notifyError('豆包语音识别错误: ' + result.error);
                        return null;
                    }
                }
            } catch (error) {
                console.error('解析JSON错误:', error);
                console.log(`[响应解析] 无法解析的JSON字符串:`, payloadStr);
            }
        } else if (messageType === this.SERVER_ERROR_RESPONSE) {
            console.log(`[响应解析] 这是SERVER_ERROR_RESPONSE消息`);
            const errorMsg = new TextDecoder().decode(payload);
            console.error('服务器错误:', errorMsg);
            this.notifyError('语音识别服务错误: ' + errorMsg);
        } else {
            console.log(`[响应解析] 未知消息类型: ${messageType}`);
        }
        
        return null;
    }

    // 工具方法
    private getHeader(messageType: number, messageTypeSpecificFlags: number, 
                     serialMethod: number, compressionType: number, reservedData: number): Uint8Array {
        const header = new Uint8Array(4);
        header[0] = (this.PROTOCOL_VERSION << 4) | this.DEFAULT_HEADER_SIZE;
        header[1] = (messageType << 4) | messageTypeSpecificFlags;
        header[2] = (serialMethod << 4) | compressionType;
        header[3] = reservedData;
        return header;
    }

    private intToBytes(value: number): Uint8Array {
        const bytes = new Uint8Array(4);
        bytes[0] = (value >> 24) & 0xFF;
        bytes[1] = (value >> 16) & 0xFF;
        bytes[2] = (value >> 8) & 0xFF;
        bytes[3] = value & 0xFF;
        return bytes;
    }

    private bytesToInt(bytes: Uint8Array): number {
        return ((bytes[0] & 0xFF) << 24) |
               ((bytes[1] & 0xFF) << 16) |
               ((bytes[2] & 0xFF) << 8) |
               (bytes[3] & 0xFF);
    }

    private gzipCompress(data: Uint8Array): Uint8Array {
        // 暂时禁用GZIP压缩，返回原数据
        return data;
    }

    private gzipDecompress(data: Uint8Array): Uint8Array {
        // 暂时返回原数据
        // TODO: 后续实现真正的GZIP解压
        return data;
    }

    private generateUUID(): string {
        return 'xxxxxxxx-xxxx-4xxx-yxxx-xxxxxxxxxxxx'.replace(/[xy]/g, function(c) {
            const r = Math.random() * 16 | 0;
            const v = c === 'x' ? r : (r & 0x3 | 0x8);
            return v.toString(16);
        });
    }

    private updateStatus(status: 'connecting' | 'connected' | 'recording' | 'processing' | 'disconnected' | 'error'): void {
        if (this.onStatusChange) {
            this.onStatusChange(status);
        }
    }

    private notifyError(error: string): void {
        if (this.onError) {
            this.onError(error);
        }
    }

    public disconnect(): void {
        this.isRecording = false;
        this.hasInitialRequestSent = false; // 重置标志，为下次连接做准备
        
        // 重置序列号为下次连接做准备
        this.sequence = 1;
        console.log('[连接断开] 序列号和初始请求标志已重置');
        
        if (this.mediaRecorder && this.mediaRecorder.state === 'recording') {
            this.mediaRecorder.stop();
        }
        
        // 清理音频流
        if (this.currentStream) {
            this.currentStream.getTracks().forEach(track => track.stop());
            this.currentStream = null;
            console.log('音频流已清理');
        }
        
        if (this.wsConnectionId) {
            const electron = require('electron');
            electron.ipcRenderer.send('doubao-ws-close', this.wsConnectionId);
            this.wsConnectionId = null;
        }
        
        // Close shared AudioContext
        if (this.audioContext) {
            this.audioContext.close().then(() => {
                console.log('AudioContext closed');
            }).catch(err => {
                console.error('Error closing AudioContext:', err);
            });
            this.audioContext = null;
        }
        
        this.updateStatus('disconnected');
    }

    public isConnected(): boolean {
        return this.wsConnectionId !== null;
    }

    public getRecordingState(): boolean {
        return this.isRecording;
    }

    public isConfigValid(): boolean {
        return !!(this.config.appKey && this.config.accessKey);
    }
}

// 全局语音识别管理器
class VoiceRecognitionManager {
    private static instance: VoiceRecognitionManager;
    private voiceRecognition: VoiceRecognition;
    private currentTargetElement: HTMLInputElement | HTMLTextAreaElement | null = null;

    private constructor() {
        this.voiceRecognition = new VoiceRecognition();
        this.setupCallbacks();
    }

    public static getInstance(): VoiceRecognitionManager {
        if (!VoiceRecognitionManager.instance) {
            VoiceRecognitionManager.instance = new VoiceRecognitionManager();
        }
        return VoiceRecognitionManager.instance;
    }

    private setupCallbacks(): void {
        this.voiceRecognition.setCallbacks({
            onResult: (result) => {
                if (this.currentTargetElement && result.text) {
                    // 实时插入识别结果
                    this.insertTextToTarget(result.text);
                }
            },
            onError: (error) => {
                console.error('语音识别错误:', error);
                this.showErrorMessage(error);
            },
            onStatusChange: (status) => {
                console.log('语音识别状态:', status);
                this.updateRecordingUI(status);
            }
        });
    }

    public async startRecording(targetElement: HTMLInputElement | HTMLTextAreaElement): Promise<void> {
        console.log('[VRM] startRecording被调用，目标元素:', targetElement.tagName);
        this.currentTargetElement = targetElement;
        
        // 确保配置已加载
        console.log('[VRM] 开始加载配置...');
        await this.voiceRecognition.loadConfig();
        
        // 检查配置是否完整
        console.log('[VRM] 检查配置有效性...');
        if (!this.voiceRecognition.isConfigValid()) {
            console.log('[VRM] 配置无效，抛出错误');
            throw new Error('请先在设置中配置豆包API密钥');
        }
        
        console.log('[VRM] 配置有效，开始录音...');
        await this.voiceRecognition.startRecording();
        console.log('[VRM] startRecording完成');
    }

    public stopRecording(): void {
        this.voiceRecognition.stopRecording();
    }

    private insertTextToTarget(text: string): void {
        if (!this.currentTargetElement) return;

        const element = this.currentTargetElement;
        const startPos = element.selectionStart || 0;
        const endPos = element.selectionEnd || 0;
        const currentValue = element.value;
        
        // 插入文本
        const newValue = currentValue.substring(0, startPos) + text + currentValue.substring(endPos);
        element.value = newValue;
        
        // 更新光标位置
        const newCursorPos = startPos + text.length;
        element.setSelectionRange(newCursorPos, newCursorPos);
        
        // 触发input事件
        element.dispatchEvent(new Event('input', { bubbles: true }));
    }

    private showErrorMessage(error: string): void {
        // 显示错误消息
        const electron = require('electron');
        electron.ipcRenderer.send('show-message', {
            type: 'error',
            message: '语音识别错误: ' + error
        });
    }

    private updateRecordingUI(status: string): void {
        // 更新所有语音识别按钮的状态
        const buttons = document.querySelectorAll('.voice-recognition-btn');
        const isAIAssistant = window.location.pathname.includes('aiAssistant.html');
        
        buttons.forEach((button: Element) => {
            const btn = button as HTMLButtonElement;
            switch (status) {
                case 'recording':
                    btn.textContent = '🔴';
                    btn.title = '正在录音... (点击停止)';
                    btn.classList.add('recording');
                    if (isAIAssistant) {
                        btn.style.background = '#dc3545';
                        btn.style.boxShadow = '0 2px 4px rgba(220,53,69,0.4)';
                    }
                    break;
                case 'processing':
                    btn.textContent = '⏳';
                    btn.title = '正在处理...';
                    btn.classList.remove('recording');
                    if (isAIAssistant) {
                        btn.style.background = '#ffc107';
                        btn.style.boxShadow = '0 2px 4px rgba(255,193,7,0.4)';
                    }
                    break;
                case 'disconnected':
                case 'error':
                    btn.textContent = '🎤';
                    btn.title = '语音输入';
                    btn.classList.remove('recording');
                    if (isAIAssistant) {
                        btn.style.background = '#007bff';
                        btn.style.boxShadow = '0 2px 4px rgba(0,123,255,0.3)';
                    }
                    break;
            }
        });
    }

    public isRecording(): boolean {
        return this.voiceRecognition.getRecordingState();
    }

    public setConfig(config: any): void {
        this.voiceRecognition.setConfig(config);
    }
}

// 添加语音识别按钮到输入元素
function addVoiceRecognitionToInput(inputElement: HTMLInputElement | HTMLTextAreaElement): void {
    console.log('[按钮添加] 开始为输入元素添加语音按钮:', inputElement.tagName, inputElement.id || '(无ID)');
    
    // 避免重复添加
    if (inputElement.parentElement?.querySelector('.voice-recognition-btn')) {
        console.log('[按钮添加] 按钮已存在，跳过添加');
        return;
    }

    const button = document.createElement('button');
    button.type = 'button';
    button.className = 'voice-recognition-btn';
    button.textContent = '🎤';
    button.title = '语音输入';
    // 检查是否在AI助手界面
    const isAIAssistant = window.location.pathname.includes('aiAssistant.html');
    
    if (isAIAssistant) {
        // AI助手界面的特殊样式
        button.style.cssText = `
            position: absolute;
            right: 45px;
            top: 50%;
            transform: translateY(-50%);
            border: none;
            background: #007bff;
            color: white;
            font-size: 14px;
            cursor: pointer;
            padding: 6px 8px;
            border-radius: 50%;
            z-index: 1000;
            width: 32px;
            height: 32px;
            display: flex;
            align-items: center;
            justify-content: center;
            box-shadow: 0 2px 4px rgba(0,123,255,0.3);
            transition: all 0.2s ease;
        `;
    } else {
        // 其他页面的通用样式
        button.style.cssText = `
            position: absolute;
            right: 5px;
            top: 50%;
            transform: translateY(-50%);
            border: none;
            background: transparent;
            font-size: 16px;
            cursor: pointer;
            padding: 2px 4px;
            border-radius: 3px;
            z-index: 1000;
            transition: all 0.2s ease;
        `;
    }

    // 确保父元素有相对定位
    const parent = inputElement.parentElement;
    console.log('[按钮添加] 父元素:', parent ? parent.tagName : '无父元素');
    
    if (parent) {
        const parentStyle = window.getComputedStyle(parent);
        if (parentStyle.position === 'static') {
            parent.style.position = 'relative';
            console.log('[按钮添加] 设置父元素为相对定位');
        }
        parent.appendChild(button);
        console.log('[按钮添加] 按钮已添加到父元素');
    } else {
        console.error('[按钮添加] 无法添加按钮：输入元素没有父元素');
    }

    // 添加悬停效果（仅AI助手界面）
    if (isAIAssistant) {
        button.addEventListener('mouseenter', () => {
            if (!button.classList.contains('recording')) {
                button.style.background = '#0056b3';
                button.style.transform = 'translateY(-50%) scale(1.05)';
            }
        });
        
        button.addEventListener('mouseleave', () => {
            if (!button.classList.contains('recording')) {
                button.style.background = '#007bff';
                button.style.transform = 'translateY(-50%) scale(1)';
            }
        });
    }

    button.addEventListener('click', async () => {
        console.log('🔥🔥🔥 [按钮点击] 麦克风按钮被点击！！！');
        
        // 强制输出到主进程控制台
        try {
            const electron = require('electron');
            electron.ipcRenderer.send('log-to-main', '🔥🔥🔥 [按钮点击] 麦克风按钮被点击！！！');
        } catch (e) {
            // 如果没有electron环境就忽略
        }
        
        try {
            console.log('🔥 [按钮点击] 尝试获取VoiceRecognitionManager...');
            const manager = VoiceRecognitionManager.getInstance();
            console.log('🔥 [按钮点击] 获取VoiceRecognitionManager实例成功');
            
            // 强制输出到主进程
            try {
                const electron = require('electron');
                electron.ipcRenderer.send('log-to-main', '🔥 [按钮点击] VoiceRecognitionManager获取成功');
            } catch (e) {}
            
            console.log('🔥 [按钮点击] 检查当前录音状态...');
            const isCurrentlyRecording = manager.isRecording();
            console.log('🔥 [按钮点击] 当前录音状态:', isCurrentlyRecording);
            
            // 强制输出录音状态到主进程
            try {
                const electron = require('electron');
                electron.ipcRenderer.send('log-to-main', `🔥 [按钮点击] 录音状态检查: ${isCurrentlyRecording}`);
            } catch (e) {}
            
            if (isCurrentlyRecording) {
                console.log('🔥 [按钮点击] 当前正在录音，停止录音');
                
                // 强制输出到主进程
                try {
                    const electron = require('electron');
                    electron.ipcRenderer.send('log-to-main', '🔥 [按钮点击] 执行停止录音');
                } catch (e) {}
                
                manager.stopRecording();
            } else {
                console.log('🔥 [按钮点击] 当前未录音，准备开始录音');
                
                // 强制输出到主进程
                try {
                    const electron = require('electron');
                    electron.ipcRenderer.send('log-to-main', '🔥 [按钮点击] 准备开始录音');
                } catch (e) {}
                
                try {
                    console.log('🔥 [按钮点击] 调用startRecording...');
                    
                    // 强制输出到主进程
                    try {
                        const electron = require('electron');
                        electron.ipcRenderer.send('log-to-main', '🔥 [按钮点击] 即将调用startRecording');
                    } catch (e) {}
                    
                    await manager.startRecording(inputElement);
                    console.log('🔥 [按钮点击] 录音启动成功');
                    
                    // 强制输出到主进程
                    try {
                        const electron = require('electron');
                        electron.ipcRenderer.send('log-to-main', '🔥 [按钮点击] startRecording调用成功');
                    } catch (e) {}
                } catch (error: any) {
                    console.error('🔥 [按钮点击] 启动语音识别失败:', error);
                    
                    // 强制输出错误到主进程
                    try {
                        const electron = require('electron');
                        electron.ipcRenderer.send('log-to-main', '🔥 [按钮点击] 启动失败: ' + error.message);
                        electron.ipcRenderer.send('log-to-main', '🔥 [按钮点击] 错误堆栈: ' + error.stack);
                    } catch (e) {}
                    
                    const electron = require('electron');
                    electron.ipcRenderer.send('show-message', {
                        type: 'error',
                        message: '启动语音识别失败: ' + error.message
                    });
                }
            }
        } catch (outerError: any) {
            console.error('🔥 [按钮点击] 按钮处理器发生异常:', outerError);
            
            // 强制输出错误到主进程
            try {
                const electron = require('electron');
                electron.ipcRenderer.send('log-to-main', '🔥 [按钮点击] 外层异常: ' + outerError.message);
            } catch (e) {}
        }
    });
}

// Initialize voice recognition for all input elements on the page
function initVoiceRecognitionForPage(): void {
    console.log('[VoiceInit] Starting voice recognition initialization');
    console.log('[VoiceInit] Current page:', window.location.pathname);
    
    // Add voice recognition to all input elements
    const inputs = document.querySelectorAll('input[type="text"], textarea');
    console.log('[VoiceInit] Found', inputs.length, 'input elements');
    
    inputs.forEach((input, index) => {
        console.log(`[VoiceInit] Adding voice button to element ${index + 1}:`, input.tagName, input.id || '(no ID)');
        addVoiceRecognitionToInput(input as HTMLInputElement | HTMLTextAreaElement);
    });

    // Listen for dynamically added input elements
    const observer = new MutationObserver((mutations) => {
        mutations.forEach((mutation) => {
            mutation.addedNodes.forEach((node) => {
                if (node.nodeType === Node.ELEMENT_NODE) {
                    const element = node as Element;
                    
                    // Check newly added element itself
                    if (element.matches('input[type="text"], textarea')) {
                        addVoiceRecognitionToInput(element as HTMLInputElement | HTMLTextAreaElement);
                    }
                    
                    // Check child elements of newly added element
                    const childInputs = element.querySelectorAll('input[type="text"], textarea');
                    childInputs.forEach((input) => {
                        addVoiceRecognitionToInput(input as HTMLInputElement | HTMLTextAreaElement);
                    });
                }
            });
        });
    });

    observer.observe(document.body, {
        childList: true,
        subtree: true
    });
}

// 将函数和类导出到全局作用域
(window as any).VoiceRecognition = VoiceRecognition;
(window as any).VoiceRecognitionManager = VoiceRecognitionManager;
(window as any).addVoiceRecognitionToInput = addVoiceRecognitionToInput;
(window as any).initVoiceRecognitionForPage = initVoiceRecognitionForPage; 