<template>
    <div class="voice-to-text">
        <div class="text-area-container">
            <el-input v-model="textContent" ref="txtEl" :placeholder="placeholder" :disabled="isRecording || disabled"
                show-word-limit type="textarea" v-bind="$attrs" />

            <div class="recording-indicator" v-if="isRecording">
                <span class="recording-dot"></span>
                正在录音... {{ recordingTime }}s
            </div>
        </div>
        <div v-if="allowSpeech" style="display: flex;">
            <el-button @click="uploadAudioFile" v-if="!isIOS" type="primary" v-loading="isTrans"
                :disabled="isProcessing || isRecording || isFormating || isTrans">
                <el-icon :size="24">
                    <Upload />
                </el-icon>
            </el-button>
            <el-button style="flex: 1;" @click="toggleRecording" type="success"
                :disabled="isProcessing || isFormating || isTrans">
                {{ isRecording ? '停止录音' : '开始录音' }}
            </el-button>
            <el-button style="flex: 1;" v-loading="isFormating" @click="formatText" type="success"
                :disabled="!modelValue || isRecording || isProcessing || isFormating || isTrans">AI整理</el-button>
        </div>
    </div>
</template>

<script>



export default {
    name: 'AudioText',
    props: {
        formData: Object,
        disabled: Boolean,
        placeholder: {
            type: String,
            default: '点击录音按钮开始语音识别...'
        },
        maxDuration: {
            type: Number,
            default: 360
        },
        appKey: {
            type: String,
            required: true
        },
        token: {
            type: String,
            required: true
        },
        sentenceTimeout: {
            type: Number,
            default: 600
        },
        modelValue: {
            type: String,
            default: ''
        },
        aiFormat: Boolean,
    },
    emits: ['update:modelValue', 'error'],
    data() {
        return {
            completedText: '',
            currentText: '',
            isRecording: false,//是否正在录音
            isProcessing: false,//是否正在处理识别结果
            audioContext: null,
            scriptProcessor: null,
            audioInput: null,
            audioStream: null,
            recordingTime: 0,
            recordingTimer: null,
            ws: null,
            taskId: this.generateTaskId(),
            messageId: this.generateMessageId(),
            readyToSend: false,
            isFormating: false,//是否正在AI整理文本
            token2: "",
            appKey2: "",
            isTrans: false,//是否正在上传转换
            isIOS:false,
        }
    },
    async created() {
        this.isIOS = /iP(hone|od|ad)/i.test(navigator.userAgent);
        if (!this.appKey || !this.token) {
            const speech_setting = "speech_setting";
            let speech = sessionStorage.getItem(speech_setting)
            if (speech) {
                const ss = speech.split("|");
                speech = {
                    SpeechAppKey: ss[0],
                    SpeechToken: ss[1]
                }
            } else {
                //speech = await $server.call("/Common/GetSysSetting",{key:"SpeechAppKey;SpeechToken"})
                speech = await $server.call("/Speech/getConfig", { key: "SpeechAppKey;SpeechToken" })
                sessionStorage.setItem(speech_setting, speech.SpeechAppKey + "|" + speech.SpeechToken)
            }
            this.appKey2 = speech.SpeechAppKey;
            this.token2 = speech.SpeechToken;
        }
    },
    computed: {
        textContent: {
            get() {
                return this.modelValue;
            },
            set(value) {
                this.$emit('update:modelValue', value);
            }
        },
        allowSpeech() {
            return !this.disabled && (this.appKey || this.appKey2);
        }
    },
    watch: {
        modelValue: {
            immediate: true,
            handler(newValue) {
                // 如果外部修改了modelValue，更新组件内部状态
                if (newValue !== this.completedText + this.currentText) {
                    // 外部修改了文本，将其全部视为已完成文本
                    this.completedText = newValue;
                    this.currentText = '';
                }
            }
        }
    },
    methods: {
        //上传录音文件识别
        async uploadAudioFile() {
            // this.isTrans = true;
            try {
                const cfg = {
                    url: '/Speech/audioFile2Text',
                    accept: 'audio/*',
                    capture:false
                }
                var res = await $server.upload(cfg);
                if (res.data) {
                    this.textContent = res.data;
                }
            } finally {
                // this.isTrans = false;
            }
        },
        /**
         * 使用阿里云 DashScope 的 qwen3-14b 模型格式化口语化文本
         * @param {string} text - 要格式化的原始文本
         * @param {string} apiKey - 你的 DashScope API Key
         * @returns {Promise<string>} 格式化后的文本
         */
        async formatTextWithQwen3(text) {
            const url = 'https://dashscope.aliyuncs.com/api/v1/services/aigc/text-generation/generation';

            const prompt = `
请将以下口语化文本整理成通顺、正式、结构清晰的文字，保留原意，但去除重复、啰嗦和语气词：
---
${text}
---
请只返回格式化后的文本，不要添加额外说明。`;

            const requestBody = {
                model: 'qwen3-14b',
                input: {
                    messages: [
                        {
                            role: 'user',
                            content: prompt
                        }
                    ]
                },
                parameters: {
                    result_format: 'message'
                }
            };

            try {
                const apiKey = this.appKey || this.appKey2;
                const response = await fetch(url, {
                    method: 'POST',
                    headers: {
                        'Authorization': `Bearer ${apiKey}`,
                        'Content-Type': 'application/json'
                    },
                    body: JSON.stringify(requestBody)
                });

                if (!response.ok) {
                    const errorData = await response.json().catch(() => ({}));
                    throw new Error(`HTTP ${response.status}: ${errorData.message || response.statusText}`);
                }

                const data = await response.json();

                // 提取模型返回的文本
                const formattedText = data.output?.choices?.[0]?.message?.content;

                if (!formattedText) {
                    throw new Error('模型返回内容为空或格式错误');
                }

                return formattedText.trim();
            } catch (error) {
                console.error('文本格式化失败:', error);
                throw error;
            }
        },
        formatText() {
            if (!this.modelValue) return;
            this.isFormating = true;
            $server.call("/Speech/formatTextUseAi", { text: this.modelValue }).then(res => {
                console.log("res", res.data);
                this.$emit("update:modelValue", res.data);
            }).finally(() => {
                this.isFormating = false;
            })

        },
        generateTaskId() {
            return Array.from(crypto.getRandomValues(new Uint8Array(16)))
                .map(b => b.toString(16).padStart(2, '0'))
                .join('');
        },
        generateMessageId() {
            return Array.from(crypto.getRandomValues(new Uint8Array(16)))
                .map(b => b.toString(16).padStart(2, '0'))
                .join('');
        },
        async toggleRecording() {
            if (this.isRecording) {
                await this.stopRecording();
            } else {
                await this.startRecording();
            }
        },
        async startRecording() {
            try {
                // 初始化WebSocket连接
                await this.initWebSocket();

                // 获取音频输入设备
                this.audioStream = await navigator.mediaDevices.getUserMedia({
                    audio: {
                        sampleRate: 16000,
                        channelCount: 1,
                        echoCancellation: true,
                        noiseSuppression: true
                    }
                });

                // 创建音频上下文
                this.audioContext = new (window.AudioContext || window.webkitAudioContext)({
                    sampleRate: 16000
                });

                // 创建音频源
                this.audioInput = this.audioContext.createMediaStreamSource(this.audioStream);

                // 创建脚本处理器
                this.scriptProcessor = this.audioContext.createScriptProcessor(2048, 1, 1);

                // 处理音频数据
                this.scriptProcessor.onaudioprocess = (event) => {
                    const inputData = event.inputBuffer.getChannelData(0);
                    const inputData16 = new Int16Array(inputData.length);
                    for (let i = 0; i < inputData.length; ++i) {
                        inputData16[i] = Math.max(-1, Math.min(1, inputData[i])) * 0x7FFF; // PCM 16-bit
                    }
                    this.sendAudioData(inputData16.buffer);
                };

                // 连接音频处理节点
                this.audioInput.connect(this.scriptProcessor);
                this.scriptProcessor.connect(this.audioContext.destination);

                // 发送开始指令
                this.sendStartCommand();

                this.isRecording = true;
                this.startTimer();
            } catch (error) {
                $msg.error('录音启动失败:' + error);
                this.$emit('error', '无法访问麦克风');
            }
        },

        async initWebSocket() {
            return new Promise((resolve, reject) => {
                const token = this.token || this.token2;
                const appKey = this.appKey || this.appKey2;
                this.ws = new WebSocket(`wss://nls-gateway.aliyuncs.com/ws/v1?token=${token}`);

                this.ws.onopen = () => {
                    console.log('WebSocket连接已建立');
                    resolve();
                };

                this.ws.onmessage = (event) => {
                    const response = JSON.parse(event.data);
                    this.handleWebSocketMessage(response);
                };

                this.ws.onerror = (error) => {
                    console.error('WebSocket错误:', error);
                    $msg.error('连接语音服务失败，请检查AppKey，Token设置是否正确');
                    this.$emit('error', 'WebSocket连接错误');
                    reject(error);
                };

                this.ws.onclose = () => {
                    console.log('WebSocket连接已关闭');
                    this.readyToSend = false;
                };
            });
        },

        sendStartCommand() {
            const startCommand = {
                header: {
                    message_id: this.generateMessageId(),
                    task_id: this.taskId,
                    namespace: "SpeechTranscriber",
                    name: "StartTranscription",
                    appkey: this.appKey || this.appKey2
                },
                payload: {
                    format: "pcm",
                    sample_rate: 16000,
                    enable_intermediate_result: true,
                    enable_punctuation_prediction: true,
                    enable_inverse_text_normalization: true,
                    text_polish_enabled: true,
                }
            };

            this.readyToSend = false;
            this.ws.send(JSON.stringify(startCommand));
        },

        sendStopCommand() {
            const stopCommand = {
                header: {
                    message_id: this.generateMessageId(),
                    task_id: this.taskId,
                    namespace: "SpeechTranscriber",
                    name: "StopTranscription",
                    appkey: this.appKey || this.appKey2
                }
            };

            this.ws.send(JSON.stringify(stopCommand));
        },

        sendAudioData(audioData) {
            if (this.ws && this.ws.readyState === WebSocket.OPEN && this.readyToSend) {
                this.ws.send(audioData);
            }
        },

        handleWebSocketMessage(response) {
            const { header, payload } = response;

            switch (header.name) {
                case 'TranscriptionStarted':
                    console.log('语音识别已开始');
                    this.readyToSend = true;
                    break;
                case 'TranscriptionResultChanged':
                    if (payload.result) {
                        this.processText(payload.result);
                    }
                    break;
                case 'SentenceEnd':
                    if (payload.result) {
                        this.processText(payload.result);
                        this.completeCurrentSentence();
                    }
                    break;
                case 'TranscriptionCompleted':
                    console.log('语音识别完成');
                    this.readyToSend = false;
                    break;
                case 'SentenceBegin':
                    break;
                default:
                    console.log('收到未知消息:', response);
            }
        },

        processText(newText, SentenceEnd) {
            if (!newText) return;

            this.currentText = newText;

            this.textContent = this.completedText + this.currentText;
        },


        completeCurrentSentence() {
            if (this.currentText) {
                // 添加当前语句到已完成文本
                this.completedText = this.completedText + this.currentText;
                this.currentText = '';
                this.textContent = this.completedText;

            }
        },

        async stopRecording() {
            if (!this.isRecording) return;

            this.isRecording = false;
            this.stopTimer();

            if (this.scriptProcessor) {
                this.scriptProcessor.disconnect();
            }
            if (this.audioInput) {
                this.audioInput.disconnect();
            }
            if (this.audioStream) {
                this.audioStream.getTracks().forEach(track => track.stop());
            }
            if (this.audioContext) {
                this.audioContext.close();
            }

            this.sendStopCommand();

            if (this.ws) {
                this.ws.close();
            }

            // 完成当前语句
            this.completeCurrentSentence();
        },

        startTimer() {
            this.recordingTime = 0;
            this.recordingTimer = setInterval(() => {
                this.recordingTime++;
                if (this.recordingTime >= this.maxDuration) {
                    this.stopRecording();
                }
            }, 1000);
        },

        stopTimer() {
            if (this.recordingTimer) {
                clearInterval(this.recordingTimer);
                this.recordingTimer = null;
            }
        }
    },
    beforeUnmount() {
        this.stopRecording();
        this.stopTimer();
        if (this.ws) {
            this.ws.close();
        }
    }
}
</script>

<style scoped>
.voice-to-text {
    display: flex;
    flex-direction: column;
    gap: 1rem;
    margin: 0 auto;
}

.text-area-container {
    position: relative;
}

.text-area {
    width: 100%;
    min-height: 150px;
    padding: 1rem;
    border: 1px solid #ddd;
    border-radius: 4px;
    font-size: 1rem;
    resize: vertical;
}

.recording-indicator {
    position: absolute;
    top: 0.5rem;
    right: 0.5rem;
    display: flex;
    align-items: center;
    gap: 0.5rem;
    color: #ff4444;
    font-size: 0.9rem;
}

.recording-dot {
    width: 8px;
    height: 8px;
    background-color: #ff4444;
    border-radius: 50%;
    animation: pulse 1s infinite;
}

.record-button {
    padding: 0.75rem 1.5rem;
    background-color: #4CAF50;
    color: white;
    border: none;
    border-radius: 4px;
    cursor: pointer;
    font-size: 1rem;
    transition: all 0.3s;
    width: 120px;
    margin: 0 auto;
}

.record-button:hover {
    background-color: #45a049;
    transform: scale(1.05);
}

.record-button.recording {
    background-color: #ff4444;
}

.record-button.recording:hover {
    background-color: #cc0000;
}

.record-button:disabled {
    background-color: #cccccc;
    cursor: not-allowed;
    transform: none;
}

@keyframes pulse {
    0% {
        transform: scale(1);
        opacity: 1;
    }

    50% {
        transform: scale(1.2);
        opacity: 0.7;
    }

    100% {
        transform: scale(1);
        opacity: 1;
    }
}
</style>