const mysql = require('../../config/db.config');
const fs = require('fs');
const sdk = require('microsoft-cognitiveservices-speech-sdk');
const axios = require('axios');

class AzureSpeechService {
    constructor() {
        this.subscriptionKey = process.env.AZURE_SPEECH_KEY;
        this.region = process.env.AZURE_SPEECH_REGION || 'eastasia';
    }

    // 修改：移除 language 参数，使用自动语言检测
    async speechToText(audioFile) {
        try {
            // 1. 读取并验证音频文件
            const audioData = fs.readFileSync(audioFile.path);
            this._validateWavFile(audioData);

            // 2. 创建语音配置
            const speechConfig = sdk.SpeechConfig.fromSubscription(this.subscriptionKey, this.region);
            // 启用自动语言检测
            speechConfig.setProperty(sdk.PropertyId.SpeechServiceConnection_AutoDetectSourceLanguages, "zh-CN,en-US");

            // 3. 创建音频配置
            const pushStream = sdk.AudioInputStream.createPushStream();
            pushStream.write(audioData);
            pushStream.close();
            const audioConfig = sdk.AudioConfig.fromStreamInput(pushStream);

            // 4. 创建自动语言检测识别器
            const autoDetectSourceLanguageConfig = sdk.AutoDetectSourceLanguageConfig.fromLanguages(["zh-CN", "en-US"]);
            const recognizer = sdk.SpeechRecognizer.FromConfig(speechConfig, autoDetectSourceLanguageConfig, audioConfig);

            // 5. 执行识别
            return new Promise((resolve, reject) => {
                let finalText = '';
                let detectedLanguage = '';

                recognizer.recognized = (s, e) => {
                    if (e.result.reason === sdk.ResultReason.RecognizedSpeech) {
                        finalText += e.result.text + ' ';
                        // 获取检测到的语言
                        if (!detectedLanguage && e.result.language) {
                            detectedLanguage = e.result.language;
                        }
                    }
                };

                recognizer.canceled = (s, e) => {
                    if (e.reason === sdk.CancellationReason.Error) {
                        reject(new Error(`Error during recognition: ${e.errorDetails}`));
                    }
                    recognizer.stopContinuousRecognitionAsync();
                };

                recognizer.sessionStopped = () => {
                    recognizer.stopContinuousRecognitionAsync();
                    resolve({
                        success: true,
                        data: {
                            text: finalText.trim(),
                            language: detectedLanguage || 'unknown'
                        }
                    });
                };

                recognizer.startContinuousRecognitionAsync(
                    () => {},
                    (error) => {
                        console.error('Error starting recognition:', error);
                        reject(error);
                    }
                );
            });
        } catch (error) {
            console.error('Recognition Error:', error.message);
            return {
                success: false,
                message: error.message
            };
        }
    }

    async assessAndStore(userId, audioFile, referenceText = null) {
        let assessmentId;
        try {
            // 1. 读取并验证音频文件
            const audioData = fs.readFileSync(audioFile.path);
            this._validateWavFile(audioData);

            // 2. 如果有参考文本，先生成标准语音
            let ttsAudioUrl = null;
            if (referenceText) {
                try {
                    const ttsResult = await this.textToSpeech(referenceText);
                    if (ttsResult.success) {
                        ttsAudioUrl = ttsResult.audioUrl;
                    }
                } catch (ttsError) {
                    console.error('TTS generation error:', ttsError);
                    // 即使TTS生成失败，我们仍继续评估流程
                }
            }

            // 3. 创建评估记录
            const [insertResult] = await mysql.query(
                'INSERT INTO pronunciation_assessments (user_id, assessment_type, reference_text, status, audio_url, tts_audio_url) VALUES (?, ?, ?, ?, ?, ?)',
                [userId, referenceText ? 'scripted' : 'free', referenceText, 'pending', `/uploads/audio/${audioFile.filename}`, ttsAudioUrl]
            );
            assessmentId = insertResult.insertId;

            // 4. 创建语音配置
            const speechConfig = sdk.SpeechConfig.fromSubscription(this.subscriptionKey, this.region);
            speechConfig.speechRecognitionLanguage = "en-US";

            // 5. 创建音频配置
            const pushStream = sdk.AudioInputStream.createPushStream();
            pushStream.write(audioData);
            pushStream.close();
            const audioConfig = sdk.AudioConfig.fromStreamInput(pushStream);

            // 6. 创建发音评估配置
            const pronunciationAssessmentConfig = new sdk.PronunciationAssessmentConfig(
                referenceText,
                sdk.PronunciationAssessmentGradingSystem.HundredMark,
                sdk.PronunciationAssessmentGranularity.Phoneme,
                true
            );
            pronunciationAssessmentConfig.enableProsodyAssessment = true;
            pronunciationAssessmentConfig.enableDetailedResult = true;

            // 7. 创建语音识别器
            const recognizer = new sdk.SpeechRecognizer(speechConfig, audioConfig);
            pronunciationAssessmentConfig.applyTo(recognizer);

            // 8. 执行评估
            return new Promise((resolve, reject) => {
                recognizer.recognized = async (s, e) => {
                    if (e.result.reason === sdk.ResultReason.RecognizedSpeech) {
                        try {
                            const pronunciationAssessmentResult = sdk.PronunciationAssessmentResult.fromResult(e.result);
                            
                            // 获取详细的JSON结果
                            const responseJson = JSON.parse(e.result.properties.getProperty(sdk.PropertyId.SpeechServiceResponse_JsonResult));

                            // 9. 更新评估记录
                            await mysql.query(
                                `UPDATE pronunciation_assessments 
                                 SET status = ?, pronunciation_score = ?, accuracy_score = ?, 
                                     fluency_score = ?, prosody_score = ?, completeness_score = ?, 
                                     recognized_text = ?, raw_json = ?, updated_at = NOW()
                                 WHERE assessment_id = ?`,
                                ['completed', 
                                 pronunciationAssessmentResult.pronunciationScore,
                                 pronunciationAssessmentResult.accuracyScore,
                                 pronunciationAssessmentResult.fluencyScore,
                                 responseJson.NBest?.[0]?.PronunciationAssessment?.ProsodyScore || 0,
                                 pronunciationAssessmentResult.completenessScore,
                                 e.result.text,
                                 JSON.stringify(responseJson),
                                 assessmentId]
                            );

                            // 10. 保存详细评估信息
                            if (responseJson.NBest?.[0]?.Words) {
                                const words = responseJson.NBest[0].Words;
                                const detailsValues = words.map(word => {
                                    return [
                                        assessmentId,
                                        userId,
                                        word.Word,
                                        word.PronunciationAssessment?.AccuracyScore || null,
                                        word.PronunciationAssessment?.ErrorType || null,
                                        JSON.stringify(word.Syllables || []),
                                        JSON.stringify(word.Phonemes || []),
                                        word.Offset || null,
                                        word.Duration || null,
                                        JSON.stringify({
                                            Prosody: word.PronunciationAssessment?.Feedback?.Prosody || {}
                                        })
                                    ];
                                });

                                if (detailsValues.length > 0) {
                                    await mysql.query(
                                        `INSERT INTO pronunciation_assessment_details 
                                         (assessment_id, user_id, word, word_accuracy_score, word_error_type, 
                                          syllables, phonemes, offset_ms, duration_ms, prosody_feedback)
                                         VALUES ?`,
                                        [detailsValues]
                                    );
                                }
                            }

                            resolve({
                                success: true,
                                data: {
                                    assessmentId,
                                    status: 'completed',
                                    scores: {
                                        pronunciation: pronunciationAssessmentResult.pronunciationScore,
                                        accuracy: pronunciationAssessmentResult.accuracyScore,
                                        fluency: pronunciationAssessmentResult.fluencyScore,
                                        prosody: responseJson.NBest?.[0]?.PronunciationAssessment?.ProsodyScore || 0,
                                        completeness: pronunciationAssessmentResult.completenessScore
                                    },
                                    recognizedText: e.result.text,
                                    ttsAudioUrl: ttsAudioUrl
                                }
                            });
                        } catch (error) {
                            reject(error);
                        }
                    }
                };

                recognizer.canceled = async (s, e) => {
                    if (e.reason === sdk.CancellationReason.Error) {
                        const errorMessage = `Error during assessment: ${e.errorDetails}`;
                        console.error(errorMessage);
                        
                        if (assessmentId) {
                            await mysql.query(
                                'UPDATE pronunciation_assessments SET status = ?, error_message = ? WHERE assessment_id = ?',
                                ['failed', errorMessage, assessmentId]
                            );
                        }
                        
                        reject(new Error(errorMessage));
                    }
                    recognizer.stopContinuousRecognitionAsync();
                };

                recognizer.startContinuousRecognitionAsync(
                    () => {},
                    (error) => {
                        console.error('Error starting assessment:', error);
                        reject(error);
                    }
                );
            });

        } catch (error) {
            console.error('Assessment Error:', error.message);
            
            if (assessmentId) {
                await mysql.query(
                    'UPDATE pronunciation_assessments SET status = ?, error_message = ? WHERE assessment_id = ?',
                    ['failed', error.message, assessmentId]
                );
            }

            return {
                success: false,
                message: error.message,
                details: error.response?.data,
                assessmentId
            };
        }
    }

    // 验证WAV文件格式
    _validateWavFile(buffer) {
        try {
            // WAV文件头的基本验证
            if (buffer.length < 44) {
                throw new Error('Invalid WAV file: too small');
            }

            // 检查文件标识符 "RIFF" 和 "WAVE"
            const riff = buffer.slice(0, 4).toString('ascii');
            const wave = buffer.slice(8, 12).toString('ascii');
            
            console.log('WAV file headers:', {
                riff,
                wave,
                fileSize: buffer.length
            });

            if (riff !== 'RIFF' || wave !== 'WAVE') {
                throw new Error('Invalid WAV file: not a WAVE file');
            }

            // 检查音频格式
            const audioFormat = buffer.readUInt16LE(20);
            const numChannels = buffer.readUInt16LE(22);
            const sampleRate = buffer.readUInt32LE(24);
            const bitsPerSample = buffer.readUInt16LE(34);

            console.log('WAV file format:', {
                audioFormat,
                numChannels,
                sampleRate,
                bitsPerSample
            });

            const requirements = {
                format: audioFormat === 1 ? 'OK' : 'Must be PCM format (1)',
                channels: numChannels === 1 ? 'OK' : 'Must be mono (1)',
                sampleRate: sampleRate === 16000 ? 'OK' : 'Must be 16kHz',
                bitsPerSample: bitsPerSample === 16 ? 'OK' : 'Must be 16-bit'
            };

            console.log('Format requirements check:', requirements);

            if (audioFormat !== 1) {
                throw new Error('Invalid WAV file: must be PCM format');
            }
            if (numChannels !== 1) {
                throw new Error('Invalid WAV file: must be mono channel');
            }
            if (sampleRate !== 16000) {
                throw new Error('Invalid WAV file: must be 16kHz sample rate');
            }
            if (bitsPerSample !== 16) {
                throw new Error('Invalid WAV file: must be 16-bit');
            }

            return true;
        } catch (error) {
            console.error('WAV file validation error:', error.message);
            throw error;
        }
    }

    // 计算文本相似度的辅助方法
    _calculateTextSimilarity(text1, text2) {
        const words1 = text1.split(/\s+/);
        const words2 = text2.split(/\s+/);
        
        let matches = 0;
        const totalWords = Math.max(words1.length, words2.length);
        
        for (const word1 of words1) {
            if (words2.includes(word1)) {
                matches++;
            }
        }
        
        return matches / totalWords;
    }

    async _getAccessToken() {
        try {
            const tokenEndpoint = `https://${this.region}.api.cognitive.microsoft.com/sts/v1.0/issueToken`;
            console.log('Token endpoint:', tokenEndpoint);
            
            const response = await axios.post(tokenEndpoint, null, {
                headers: {
                    'Ocp-Apim-Subscription-Key': this.subscriptionKey,
                    'Content-Type': 'application/x-www-form-urlencoded'
                }
            });

            if (response.status === 200) {
                const token = response.data;
                console.log('Token retrieved successfully');
                return token;
            } else {
                throw new Error(`Failed to get token. Status: ${response.status}`);
            }
        } catch (error) {
            console.error('Error getting access token:', error.message);
            if (error.response) {
                console.error('Token Error Details:', {
                    status: error.response.status,
                    data: error.response.data
                });
            }
            throw new Error('Failed to get access token: ' + error.message);
        }
    }

    async textToSpeech(text, voiceName = 'en-US-JennyMultilingualNeural') {
        try {
            // 1. 创建唯一的文件名
            const timestamp = Date.now();
            const fileName = `tts-${timestamp}.wav`;
            const filePath = `public/uploads/audio/${fileName}`;

            // 2. 创建语音配置
            const speechConfig = sdk.SpeechConfig.fromSubscription(this.subscriptionKey, this.region);
            speechConfig.speechSynthesisVoiceName = voiceName;

            // 3. 创建音频配置
            const audioConfig = sdk.AudioConfig.fromAudioFileOutput(filePath);

            // 4. 创建语音合成器
            const synthesizer = new sdk.SpeechSynthesizer(speechConfig, audioConfig);

            // 5. 执行语音合成
            return new Promise((resolve, reject) => {
                synthesizer.speakTextAsync(
                    text,
                    result => {
                        if (result.reason === sdk.ResultReason.SynthesizingAudioCompleted) {
                            // 合成成功
                            synthesizer.close();
                            resolve({
                                success: true,
                                audioUrl: `/uploads/audio/${fileName}`,
                                message: 'Text-to-speech conversion completed successfully'
                            });
                        } else {
                            // 合成失败
                            const error = new Error(`Speech synthesis failed: ${result.errorDetails}`);
                            synthesizer.close();
                            reject(error);
                        }
                    },
                    error => {
                        synthesizer.close();
                        reject(error);
                    }
                );
            });
        } catch (error) {
            console.error('TTS Error:', error);
            throw error;
        }
    }

    async textToSpeechStream(text, voiceName = 'en-US-JennyMultilingualNeural') {
        try {
            // 1. 创建语音配置
            const speechConfig = sdk.SpeechConfig.fromSubscription(this.subscriptionKey, this.region);
            speechConfig.speechSynthesisVoiceName = voiceName;

            // 2. 创建音频流
            const audioStream = sdk.AudioOutputStream.createPullStream();
            const audioConfig = sdk.AudioConfig.fromStreamOutput(audioStream);

            // 3. 创建语音合成器
            const synthesizer = new sdk.SpeechSynthesizer(speechConfig, audioConfig);

            // 4. 执行语音合成
            return new Promise((resolve, reject) => {
                let audioData = [];

                synthesizer.synthesisStarted = (s, e) => {
                    console.log(`Synthesis started`);
                };

                synthesizer.synthesizing = (s, e) => {
                    // 接收音频数据
                    const chunk = Buffer.from(e.result.audioData);
                    audioData.push(chunk);
                };

                synthesizer.synthesisCompleted = (s, e) => {
                    console.log(`Synthesis completed`);
                    const audioBuffer = Buffer.concat(audioData);
                    synthesizer.close();
                    resolve(audioBuffer);
                };

                synthesizer.synthesisError = (s, e) => {
                    console.error(`Synthesis error: ${e.error}`);
                    synthesizer.close();
                    reject(new Error(`Synthesis failed: ${e.error}`));
                };

                // 开始合成
                synthesizer.speakTextAsync(
                    text,
                    result => {
                        if (result.reason === sdk.ResultReason.SynthesizingAudioCompleted) {
                            // 合成成功会通过上面的事件处理
                        } else {
                            reject(new Error(`Synthesis failed: ${result.errorDetails}`));
                        }
                    },
                    error => {
                        reject(error);
                    }
                );
            });
        } catch (error) {
            console.error('TTS Stream Error:', error);
            throw error;
        }
    }
}

module.exports = new AzureSpeechService(); 