package com.scale.service.voice.service.impl;

import com.alibaba.dashscope.aigc.multimodalconversation.MultiModalConversation;
import com.alibaba.dashscope.aigc.multimodalconversation.MultiModalConversationParam;
import com.alibaba.dashscope.aigc.multimodalconversation.MultiModalConversationResult;
import com.alibaba.dashscope.common.MultiModalMessage;
import com.alibaba.dashscope.common.Role;
import com.scale.service.voice.service.StreamAsrService;
import com.scale.service.voice.service.TaskProgressService;
import io.reactivex.Flowable;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.stereotype.Service;

import java.util.Arrays;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
import java.util.concurrent.atomic.AtomicReference;

/**
 * 流式语音识别服务实现类
 * @author crp
 * @since 2025-01-26
 */
@Service
public class StreamAsrServiceImpl implements StreamAsrService {
    
    @Value("${dashscope.api-key:}")
    private String dashscopeApiKey;
    
    @Autowired
    private TaskProgressService taskProgressService;
    
    @Override
    public String processAsrStream(String taskId, String audioData) throws Exception {
        MultiModalConversation conv = new MultiModalConversation();
        
        MultiModalMessage userMessage = MultiModalMessage.builder()
                .role(Role.USER.getValue())
                .content(Arrays.asList(
                        Collections.singletonMap("audio", audioData)))
                .build();
        
        MultiModalMessage sysMessage = MultiModalMessage.builder()
                .role(Role.SYSTEM.getValue())
                .content(Arrays.asList(Collections.singletonMap("text", "")))
                .build();
        
        Map<String, Object> asrOptions = new HashMap<>();
        asrOptions.put("enable_lid", true);
        asrOptions.put("enable_itn", true);
        asrOptions.put("enable_punctuation", true);
        asrOptions.put("language", "zh");
        
        MultiModalConversationParam param = MultiModalConversationParam.builder()
                .apiKey(dashscopeApiKey)
                .model("qwen3-asr-flash")
                .message(userMessage)
                .message(sysMessage)
                .parameter("asr_options", asrOptions)
                .build();
        
        Flowable<MultiModalConversationResult> resultFlowable = conv.streamCall(param);
        AtomicReference<String> lastRecognizedText = new AtomicReference<>("");
        
        resultFlowable.blockingForEach(item -> {
            try {
                String currentText = item.getOutput().getChoices().get(0)
                        .getMessage().getContent().get(0).get("text").toString();
                
                // 🔧 修复重复文本问题：流式ASR返回的是累积文本，不是增量文本
                // 保存当前最新的完整文本，避免重复拼接
                if (currentText != null && !currentText.equals(lastRecognizedText.get())) {
                    lastRecognizedText.set(currentText);
                    System.out.println("🎤 ASR流式更新: " + currentText);
                    
                    // 推送ASR进度 - 使用当前完整文本
                    taskProgressService.updateProgress(taskId, "ASR", 25, 
                        "正在识别语音: " + currentText);
                }
                
            } catch (Exception e) {
                throw new RuntimeException("ASR处理失败: " + e.getMessage(), e);
            }
        });
        
        // 返回最终识别的完整文本
        String finalRecognizedText = lastRecognizedText.get();
        System.out.println("✅ ASR最终结果: " + finalRecognizedText);
        return finalRecognizedText;
    }
}
