package com.boulderai.mcp.llm;

import com.boulderai.mcp.model.llm.LlmRequest;
import com.boulderai.mcp.model.llm.LlmResponse;
import com.fasterxml.jackson.databind.ObjectMapper;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.http.HttpHeaders;
import org.springframework.http.MediaType;
import org.springframework.web.reactive.function.client.WebClient;
import reactor.core.publisher.Flux;
import reactor.core.publisher.Mono;

import java.time.Duration;

/**
 * OpenAI API客户端实现
 */
public class OpenAiLlmClient implements LlmClient {
    
    private static final Logger logger = LoggerFactory.getLogger(OpenAiLlmClient.class);
    
    private final WebClient webClient;
    private final ObjectMapper objectMapper;
    private final String apiKey;
    private final String baseUrl;
    
    // 支持的模型列表
    private static final String[] SUPPORTED_MODELS = {
        "gpt-4", "gpt-4-turbo", "gpt-4-turbo-preview",
        "gpt-3.5-turbo", "gpt-3.5-turbo-16k"
    };
    
    public OpenAiLlmClient(String apiKey, String baseUrl) {
        this.apiKey = apiKey;
        this.baseUrl = baseUrl != null ? baseUrl : "https://api.openai.com";
        this.objectMapper = new ObjectMapper();
        
        this.webClient = WebClient.builder()
                .baseUrl(this.baseUrl)
                .defaultHeader(HttpHeaders.AUTHORIZATION, "Bearer " + apiKey)
                .defaultHeader(HttpHeaders.CONTENT_TYPE, MediaType.APPLICATION_JSON_VALUE)
                .defaultHeader(HttpHeaders.USER_AGENT, "BoulderAI-MCP-Client/1.0")
                .codecs(configurer -> configurer.defaultCodecs().maxInMemorySize(10 * 1024 * 1024)) // 10MB
                .build();
    }
    
    @Override
    public Mono<LlmResponse> chatCompletion(LlmRequest request) {
        logger.debug("Sending chat completion request to OpenAI: model={}, messages={}", 
                request.getModel(), request.getMessages().size());
        
        // 确保不是流式请求
        request.setStream(false);
        
        return webClient.post()
                .uri("/v1/chat/completions")
                .bodyValue(request)
                .retrieve()
                .bodyToMono(LlmResponse.class)
                .timeout(Duration.ofSeconds(60))
                .doOnSuccess(response -> logger.debug("Received response from OpenAI: id={}", response.getId()))
                .doOnError(error -> logger.error("Error calling OpenAI API", error));
    }
    
    @Override
    public Flux<LlmResponse> chatCompletionStream(LlmRequest request) {
        logger.debug("Sending streaming chat completion request to OpenAI: model={}, messages={}", 
                request.getModel(), request.getMessages().size());
        
        // 确保是流式请求
        request.setStream(true);
        
        return webClient.post()
                .uri("/v1/chat/completions")
                .bodyValue(request)
                .retrieve()
                .bodyToFlux(String.class)
                .filter(line -> line.startsWith("data: ") && !line.equals("data: [DONE]"))
                .map(line -> line.substring(6)) // 移除 "data: " 前缀
                .filter(json -> !json.trim().isEmpty())
                .flatMap(json -> {
                    try {
                        LlmResponse response = objectMapper.readValue(json, LlmResponse.class);
                        return Mono.just(response);
                    } catch (Exception e) {
                        logger.warn("Failed to parse streaming response: {}", json, e);
                        return Mono.empty();
                    }
                })
                .timeout(Duration.ofSeconds(120))
                .doOnComplete(() -> logger.debug("Streaming response completed"))
                .doOnError(error -> logger.error("Error in streaming OpenAI API call", error));
    }
    
    @Override
    public boolean isAvailable() {
        try {
            // 简单的健康检查 - 尝试获取模型列表
            webClient.get()
                    .uri("/v1/models")
                    .retrieve()
                    .bodyToMono(String.class)
                    .timeout(Duration.ofSeconds(5))
                    .block();
            return true;
        } catch (Exception e) {
            logger.warn("OpenAI API health check failed", e);
            return false;
        }
    }
    
    @Override
    public Mono<String[]> getSupportedModels() {
        return Mono.just(SUPPORTED_MODELS);
    }
    
    @Override
    public String getClientType() {
        return "openai";
    }
    
    /**
     * 构建器模式
     */
    public static class Builder {
        private String apiKey;
        private String baseUrl;
        
        public Builder apiKey(String apiKey) {
            this.apiKey = apiKey;
            return this;
        }
        
        public Builder baseUrl(String baseUrl) {
            this.baseUrl = baseUrl;
            return this;
        }
        
        public OpenAiLlmClient build() {
            if (apiKey == null || apiKey.trim().isEmpty()) {
                throw new IllegalArgumentException("API key is required");
            }
            return new OpenAiLlmClient(apiKey, baseUrl);
        }
    }
    
    public static Builder builder() {
        return new Builder();
    }
}