package com.langchain4j.core.llm.provider.impl;

import com.langchain4j.core.llm.provider.LLMProvider;
import com.langchain4j.core.llm.model.*;
import dev.langchain4j.model.openai.OpenAiChatModel;
import dev.langchain4j.model.openai.OpenAiEmbeddingModel;
import dev.langchain4j.model.output.Response;
import lombok.extern.slf4j.Slf4j;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.stereotype.Component;

import java.time.Duration;
import java.util.Arrays;
import java.util.List;
import java.util.concurrent.CompletableFuture;
import java.util.stream.Collectors;

/**
 * OpenAI提供者实现
 */
@Slf4j
@Component("openaiProvider")
public class OpenAIProvider implements LLMProvider {
    
    @Value("${langchain4j.openai.api-key}")
    private String apiKey;
    
    @Value("${langchain4j.openai.base-url:https://api.openai.com/v1}")
    private String baseUrl;
    
    @Value("${langchain4j.openai.timeout:30}")
    private Integer timeout;
    
    @Value("${langchain4j.openai.model:gpt-3.5-turbo}")
    private String defaultModel;
    
    @Value("${langchain4j.openai.embedding-model:text-embedding-ada-002}")
    private String defaultEmbeddingModel;
    
    private OpenAiChatModel chatModel;
    private OpenAiEmbeddingModel embeddingModel;
    
    @Override
    public CompletableFuture<GenerationResult> generate(String prompt, GenerationConfig config) {
        return CompletableFuture.supplyAsync(() -> {
            try {
                OpenAiChatModel model = getChatModel(config);
                
                String systemPrompt = config.getSystemPrompt();
                String userPrompt = prompt;
                
                Response<String> response;
                if (systemPrompt != null && !systemPrompt.trim().isEmpty()) {
                    response = model.generate(Arrays.asList(
                            dev.langchain4j.data.message.SystemMessage.from(systemPrompt),
                            dev.langchain4j.data.message.UserMessage.from(userPrompt)
                    ));
                } else {
                    response = model.generate(userPrompt);
                }
                
                String content = response.content();
                
                // 计算token使用情况（简化版本）
                GenerationResult.TokenUsage tokenUsage = GenerationResult.TokenUsage.builder()
                        .promptTokens(estimateTokens(prompt))
                        .completionTokens(estimateTokens(content))
                        .build();
                
                return GenerationResult.success(content, config.getModelName() != null ? config.getModelName() : defaultModel, tokenUsage);
                
            } catch (Exception e) {
                log.error("OpenAI generation failed", e);
                return GenerationResult.failure(e.getMessage(), config.getModelName() != null ? config.getModelName() : defaultModel);
            }
        });
    }
    
    @Override
    public CompletableFuture<EmbeddingResult> embed(String text) {
        return CompletableFuture.supplyAsync(() -> {
            try {
                OpenAiEmbeddingModel model = getEmbeddingModel();
                Response<dev.langchain4j.model.embedding.Embedding> response = model.embed(text);
                
                List<Float> embedding = response.content().vector();
                Integer tokenCount = estimateTokens(text);
                
                return EmbeddingResult.success(embedding, defaultEmbeddingModel, text, tokenCount);
                
            } catch (Exception e) {
                log.error("OpenAI embedding failed", e);
                return EmbeddingResult.failure(e.getMessage(), defaultEmbeddingModel, text);
            }
        });
    }
    
    @Override
    public ModelInfo getModelInfo() {
        return ModelInfo.builder()
                .id("openai-" + defaultModel)
                .name(defaultModel)
                .providerType("openai")
                .modelType(ModelInfo.ModelType.CHAT)
                .contextLength(4096) // GPT-3.5-turbo的上下文长度
                .supportedParameters(Arrays.asList("temperature", "max_tokens", "top_p", "frequency_penalty", "presence_penalty"))
                .capabilities(Arrays.asList(
                        ModelInfo.ModelCapability.TEXT_GENERATION,
                        ModelInfo.ModelCapability.CODE_GENERATION,
                        ModelInfo.ModelCapability.TRANSLATION,
                        ModelInfo.ModelCapability.SUMMARIZATION,
                        ModelInfo.ModelCapability.QUESTION_ANSWERING
                ))
                .pricing(ModelInfo.PricingInfo.builder()
                        .inputPricePer1kTokens(0.0015)
                        .outputPricePer1kTokens(0.002)
                        .currency("USD")
                        .billingUnit("per_1k_tokens")
                        .build())
                .status(ModelInfo.ModelStatus.ACTIVE)
                .build();
    }
    
    @Override
    public CompletableFuture<Boolean> isAvailable() {
        return CompletableFuture.supplyAsync(() -> {
            try {
                OpenAiChatModel model = getChatModel(GenerationConfig.defaultConfig());
                Response<String> response = model.generate("test");
                return response != null && response.content() != null;
            } catch (Exception e) {
                log.error("OpenAI availability check failed", e);
                return false;
            }
        });
    }
    
    @Override
    public String getProviderType() {
        return "openai";
    }
    
    private OpenAiChatModel getChatModel(GenerationConfig config) {
        if (chatModel == null) {
            chatModel = OpenAiChatModel.builder()
                    .apiKey(apiKey)
                    .baseUrl(baseUrl)
                    .modelName(config.getModelName() != null ? config.getModelName() : defaultModel)
                    .temperature(config.getTemperature().floatValue())
                    .maxTokens(config.getMaxTokens())
                    .topP(config.getTopP().floatValue())
                    .frequencyPenalty(config.getFrequencyPenalty().floatValue())
                    .presencePenalty(config.getPresencePenalty().floatValue())
                    .timeout(Duration.ofSeconds(timeout))
                    .build();
        }
        return chatModel;
    }
    
    private OpenAiEmbeddingModel getEmbeddingModel() {
        if (embeddingModel == null) {
            embeddingModel = OpenAiEmbeddingModel.builder()
                    .apiKey(apiKey)
                    .baseUrl(baseUrl)
                    .modelName(defaultEmbeddingModel)
                    .timeout(Duration.ofSeconds(timeout))
                    .build();
        }
        return embeddingModel;
    }
    
    /**
     * 估算token数量（简化版本）
     */
    private Integer estimateTokens(String text) {
        // 这是一个简化的估算，实际应该使用tokenizer
        return text.length() / 4; // 大约每4个字符1个token
    }
} 