package org.gwh.airagknowledge.core.llm.impl;

import com.fasterxml.jackson.databind.ObjectMapper;
import lombok.extern.slf4j.Slf4j;
import org.gwh.airagknowledge.core.llm.LlmService;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.http.HttpEntity;
import org.springframework.http.HttpHeaders;
import org.springframework.http.MediaType;
import org.springframework.http.ResponseEntity;
import org.springframework.stereotype.Service;
import org.springframework.web.client.RestTemplate;

import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;

/**
 * ChatGLM模型服务实现
 */
@Slf4j
@Service
public class ChatGlmLlmService implements LlmService {

    private final RestTemplate restTemplate;
    private final String apiKey;
    private final String baseUrl;
    private final String promptTemplate;
    private final String embeddingModel;
    private double temperature = 0.1;
    private final ObjectMapper objectMapper;

    public ChatGlmLlmService(
            @Value("${llm.chatglm.api-key}") String apiKey,
            @Value("${llm.chatglm.base-url}") String baseUrl,
            @Value("${llm.chatglm.embedding-model}") String embeddingModel,
            @Value("${llm.prompt.template}") String promptTemplate) {
        this.apiKey = apiKey;
        this.baseUrl = baseUrl;
        this.embeddingModel = embeddingModel;
        this.promptTemplate = promptTemplate;
        this.restTemplate = new RestTemplate();
        this.objectMapper = new ObjectMapper();
        
        log.info("ChatGLM LLM service initialized with model: {}", embeddingModel);
    }

    @Override
    public String generateAnswer(String question, String context) {
        try {
            log.info("Generating answer with ChatGLM for question: {}", question);
            
            // 构建提示模板
            String prompt = promptTemplate
                    .replace("{context}", context)
                    .replace("{question}", question);
            
            // 准备请求头
            HttpHeaders headers = new HttpHeaders();
            headers.setContentType(MediaType.APPLICATION_JSON);
            headers.set("Authorization", "Bearer " + apiKey);
            
            // 准备请求体
            Map<String, Object> requestBody = new HashMap<>();
            requestBody.put("model", "chatglm_turbo");
            requestBody.put("temperature", temperature);
            
            List<Map<String, String>> messages = new ArrayList<>();
            Map<String, String> userMessage = new HashMap<>();
            userMessage.put("role", "user");
            userMessage.put("content", prompt);
            messages.add(userMessage);
            
            requestBody.put("messages", messages);
            
            // 创建请求实体
            HttpEntity<Map<String, Object>> requestEntity = new HttpEntity<>(requestBody, headers);
            
            // 发送请求
            ResponseEntity<String> response = restTemplate.postForEntity(
                    baseUrl + "/v1/chat/completions", 
                    requestEntity, 
                    String.class
            );
            
            // 解析响应
            Map<String, Object> responseMap = objectMapper.readValue(response.getBody(), Map.class);
            List<Map<String, Object>> choices = (List<Map<String, Object>>) responseMap.get("choices");
            Map<String, Object> message = (Map<String, Object>) choices.get(0).get("message");
            String answer = (String) message.get("content");
            
            log.info("Generated answer of length: {}", answer.length());
            return answer;
        } catch (Exception e) {
            log.error("Error generating answer with ChatGLM", e);
            return "Sorry, I encountered an error while trying to answer your question with ChatGLM.";
        }
    }

    @Override
    public void setTemperature(double temperature) {
        this.temperature = temperature;
        log.info("Updated temperature to {} for ChatGLM model", temperature);
    }

    @Override
    public String getModelName() {
        return "chatglm:glm-4-flashx";
    }
} 