package com.coai.ai.client

import com.theokanning.openai.completion.chat.ChatCompletionRequest
import com.theokanning.openai.completion.chat.ChatMessage
import com.theokanning.openai.service.OpenAiService
import org.springframework.beans.factory.annotation.Value
import org.springframework.stereotype.Component
import java.time.Duration

@Component
class OpenAIClient(
    @Value("\${openai.api-key}")
    private val apiKey: String,
    
    @Value("\${openai.model:gpt-4}")
    private val model: String = "gpt-4"
) {
    
    private val service by lazy {
        OpenAiService(apiKey, Duration.ofSeconds(60))
    }
    
    /**
     * 聊天对话
     */
    suspend fun chat(
        prompt: String,
        systemPrompt: String? = null,
        temperature: Double = 0.7,
        maxTokens: Int = 2000
    ): String {
        val messages = mutableListOf<ChatMessage>()
        
        if (systemPrompt != null) {
            messages.add(ChatMessage("system", systemPrompt))
        }
        messages.add(ChatMessage("user", prompt))
        
        val request = ChatCompletionRequest.builder()
            .model(model)
            .messages(messages)
            .temperature(temperature)
            .maxTokens(maxTokens)
            .build()
        
        val response = service.createChatCompletion(request)
        return response.choices.firstOrNull()?.message?.content ?: ""
    }
    
    /**
     * 生成文本嵌入向量
     */
    suspend fun createEmbedding(text: String): List<Double> {
        val request = com.theokanning.openai.embedding.EmbeddingRequest.builder()
            .model("text-embedding-ada-002")
            .input(listOf(text))
            .build()
        
        val response = service.createEmbeddings(request)
        return response.data.firstOrNull()?.embedding ?: emptyList()
    }
    
    /**
     * 流式聊天（用于实时对话）
     */
    fun chatStream(
        prompt: String,
        systemPrompt: String? = null,
        onChunk: (String) -> Unit
    ) {
        val messages = mutableListOf<ChatMessage>()
        
        if (systemPrompt != null) {
            messages.add(ChatMessage("system", systemPrompt))
        }
        messages.add(ChatMessage("user", prompt))
        
        val request = ChatCompletionRequest.builder()
            .model(model)
            .messages(messages)
            .temperature(0.7)
            .stream(true)
            .build()
        
        service.streamChatCompletion(request)
            .blockingForEach { chunk ->
                val content = chunk.choices.firstOrNull()?.message?.content
                if (content != null) {
                    onChunk(content)
                }
            }
    }
}
