package llmserviceopenai.service

import io.ktor.client.HttpClient

import io.ktor.client.call.body
import io.ktor.client.engine.cio.CIO
import io.ktor.client.plugins.ClientRequestException
import io.ktor.client.plugins.HttpTimeout
import io.ktor.client.plugins.ServerResponseException
import io.ktor.client.plugins.contentnegotiation.ContentNegotiation
import io.ktor.client.request.header
import io.ktor.client.request.post
import io.ktor.client.request.setBody
import io.ktor.client.statement.HttpResponse
import io.ktor.client.statement.bodyAsText
import io.ktor.http.ContentType
import io.ktor.http.HttpHeaders
import io.ktor.http.contentType
import io.ktor.serialization.kotlinx.json.json
import kotlinx.serialization.json.Json
import llmserviceopenai.model.EmbeddingModelResponse
import llmserviceopenai.model.EmbeddingRequest
import org.slf4j.LoggerFactory

class EmbeddingService {

    private val logger = LoggerFactory.getLogger(EmbeddingService::class.java)

    // 使用统一的 JSON 配置（确保兼容性）
    private val json = Json {
        ignoreUnknownKeys = true  // 忽略未知字段（防止后端新增字段导致崩溃）
        explicitNulls = false     // null 字段可省略
    }

    private val client = HttpClient(CIO) {
        install(ContentNegotiation) {
            json(json)
        }

        // 链接超时配置
        install(HttpTimeout) {
            requestTimeoutMillis = 600_00
            connectTimeoutMillis = 30_000
            socketTimeoutMillis = 600_00
        }
    }

    // vllm-embedding
    private val backendUrl = "http://10.10.1.166:10012/v1/embeddings"
    private val apiKey = "EMPTY"

    suspend fun embeddingHandler(request: EmbeddingRequest): EmbeddingModelResponse {
        logger.info("Forwarding request to backend: $request")

        try {

            val startTimePoint = System.currentTimeMillis()

            // 向真实后端发起请求
            val response: HttpResponse = client.post(backendUrl) {
                contentType(ContentType.Application.Json)
                setBody(request)
                header(HttpHeaders.Authorization, "Bearer $apiKey")
            }

            val openAIResponse = response.body<EmbeddingModelResponse>()

            val duration = System.currentTimeMillis() - startTimePoint
            logger.info("Received valid OpenAI-style response: ${openAIResponse}")
            logger.info("Embedding generated (dim=${openAIResponse.data.first().embedding.size}, time=$duration ms)")

            return openAIResponse

        } catch (e: ClientRequestException) {
            val status = e.response.status
            val errorBody = e.response.bodyAsText()
            logger.error("Client error ($status): $errorBody")
            throw e
        } catch (e: ServerResponseException) {
            val status = e.response.status
            val errorBody = e.response.bodyAsText()
            logger.error("Server error ($status): $errorBody")
            throw e
        } catch (e: Exception) {
            logger.error("Unexpected error while calling backend", e)
            throw e
        }
    }

    fun close() {
        client.close()
    }
}