package bm.com.ai.server.LLModel.strategy;

import bm.com.ai.server.LLModel.config.QwenModelConfiguration;
import bm.com.ai.server.LLModel.constant.ModalityType;
import bm.com.ai.server.LLModel.constant.ModelType;
import bm.com.ai.server.dto.MessageRequest;
import bm.com.ai.server.dto.MessageResponse;
import bm.com.ai.server.rag.service.RagService;
import bm.com.ai.server.service.MultimodalService;
import com.alibaba.dashscope.aigc.generation.Generation;
import com.alibaba.dashscope.aigc.generation.GenerationParam;
import com.alibaba.dashscope.aigc.generation.GenerationResult;
import dev.langchain4j.model.chat.ChatLanguageModel;
import dev.langchain4j.model.output.Response;
import jakarta.annotation.Resource;
import lombok.Data;
import lombok.extern.slf4j.Slf4j;
import org.springframework.stereotype.Component;

import java.util.HashMap;
import java.util.Map;

@Data
@Slf4j
@Component
public class QwenModelStrategy extends AbstractModelStrategy {

    @Resource
    MultimodalService multimodalService;

    @Resource
    QwenModelConfiguration qwenConfig;

    @Resource
    RagService ragService;

    private QwenChatModelWrapper qwenModel;

    /**
     * 获取模型类型
     *
     * @return
     */
    @Override
    public ModelType getModelType() {
        return ModelType.QWEN;
    }

    @Override
    public synchronized void initialize() {
        if (!initialized) {
            log.info("Initializing Qwen model: {}", qwenConfig.getModelName());

            // 使用低级API配置构建模型
            GenerationParam param = GenerationParam.builder()
                    .model(qwenConfig.getModelName())
                    .apiKey(qwenConfig.getKey())
//                    .apiSecret(qwenConfig.getApi().getSecret())
                    .temperature((float) qwenConfig.getTemperature())
                    .topP(qwenConfig.getTopP())
                    .maxTokens(qwenConfig.getMaxTokens())
                    .build();

            // 适配LangChain4j接口
            qwenModel = new QwenChatModelWrapper(param);
            this.initialized = true;
            log.info("Qwen model initialized successfully");
        }
    }

    @Override
    protected MessageResponse doProcess(MessageRequest request) {

        long startTime = System.currentTimeMillis();

        // 1. 处理多模态输入
        String processedContent = multimodalService.process(request);

        // 2. 处理RAGx
        String context = "";
        if (Boolean.TRUE.equals(request.getUseRag())) {
            context = ragService.retrieveRelevantContext(processedContent);
            log.info("Qwen RAG context: {}", context);
        }

        // 3. 构建完整提示词
        String prompt = buildPrompt(context, processedContent);

        log.info("Qwen request prompt: {}", prompt);
        // 4. 调用模型
        Response<String> response = qwenModel.generate(prompt);

        System.out.println("Response ----- - :" + response);

        // 5. 计算token统计
        MessageResponse.TokenStats tokenStats = calculateTokenStats(prompt, response.content());

        // 6. 构建响应
        return MessageResponse.builder()
                .content(response.content())
                .modelUsed(ModelType.QWEN)
                .processingTimeMs(System.currentTimeMillis() - startTime)
                .isSuccessful(true)
                .tokenStats(tokenStats)
                .ragSources(ragService.getLastSources())
                .metadata(buildMetadata(response))
                .build();

    }

    @Override
    public boolean supports(MessageRequest request) {
        if (!qwenConfig.isEnabled()) {
            return false;
        }

        // 检查是否支持请求的模态类型
        if (request.getMediaContents() != null && !request.getMediaContents().isEmpty()) {
            for (var media : request.getMediaContents()) {
                // 通义千问支持文本和图像
                if (media.getType() != ModalityType.TEXT && media.getType() != ModalityType.IMAGE) {
                    return false;
                }
            }
        }

        return true;
    }

    /**
     * 构建提示词
     */
    private String buildPrompt(String context, String content) {
        if (context == null || context.isEmpty()) {
            return content;
        }
        return "根据以下上下文信息回答问题：\n" +
                "上下文：\n" + context + "\n\n" +
                "问题：" + content;
    }

    @Override
    public boolean isHealthy() {
        return initialized && qwenConfig.isEnabled();
    }

    private MessageResponse.TokenStats calculateTokenStats(String prompt, String response) {
        // 实际应用中应使用模型提供的token计数器
        int promptTokens = estimateTokens(prompt);
        int completionTokens = estimateTokens(response);

        return MessageResponse.TokenStats.builder()
                .promptTokens(promptTokens)
                .completionTokens(completionTokens)
                .totalTokens(promptTokens + completionTokens)
                .build();
    }

    /**
     * 构建元数据
     */
    private Map<String, Object> buildMetadata(Response<String> response) {
        Map<String, Object> metadata = new HashMap<>();
        metadata.put("model", qwenConfig.getModelName());
        metadata.put("temperature", qwenConfig.getTemperature());
        metadata.put("finishReason", response.finishReason());
        return metadata;
    }

    /**
     * 估算token数量（简单实现，实际应使用模型专用tokenizer）
     */
    private int estimateTokens(String text) {
        if (text == null || text.isEmpty()) {
            return 0;
        }
        // 粗略估算：1 token ~= 4个字符
        return (int) Math.ceil(text.length() / 4.0);
    }

    public class QwenChatModelWrapper implements ChatLanguageModel {

        private Generation generation;
        private GenerationParam param;

        public QwenChatModelWrapper(GenerationParam param) {
            this.param = param;
            this.generation = new Generation();
        }

        public Response<String> generate(String prompt) {
            try {
                log.info("Qwen request: {}", prompt);
                GenerationParam requestParam = GenerationParam.builder()
                        .model("qwen-plus")
                        .apiKey(param.getApiKey())
                        .temperature(param.getTemperature())
                        .topP(param.getTopP())
                        .maxTokens(param.getMaxTokens())
                        .prompt(prompt)
                        .build();

                System.out.println("requestParam : " + requestParam);
                GenerationResult result = generation.call(requestParam);

                if (result.getOutput() == null || result.getOutput().getText() == null) {
                    throw new RuntimeException("Qwen API returned empty response: " + result);
                }

                return Response.from(result.getOutput().getText());
            } catch (Exception e) {
                throw new RuntimeException("Error calling Qwen API: " + e.getMessage(), e);
            }
        }
    }
}
