package com.autoglm.android.core.llm

import android.util.Log
import kotlinx.coroutines.Dispatchers
import kotlinx.coroutines.withContext
import okhttp3.MediaType.Companion.toMediaTypeOrNull
import okhttp3.OkHttpClient
import okhttp3.Request
import okhttp3.RequestBody.Companion.toRequestBody
import org.json.JSONArray
import org.json.JSONObject
import java.util.concurrent.TimeUnit

/**
 * GLM大模型服务
 */
class LLMService(
    private val apiKey: String,
    private val baseUrl: String = "https://open.bigmodel.cn/api/paas/v4/chat/completions"
) {
    companion object {
        private const val TAG = "LLMService"
        private const val MODEL_NAME = "glm-4-plus"
        private const val MAX_TOKENS = 4096
        private const val DEFAULT_TIMEOUT_SECONDS = 30L
    }
    
    // 初始化HTTP客户端
    private val client = OkHttpClient.Builder()
        .connectTimeout(DEFAULT_TIMEOUT_SECONDS, TimeUnit.SECONDS)
        .readTimeout(DEFAULT_TIMEOUT_SECONDS, TimeUnit.SECONDS)
        .writeTimeout(DEFAULT_TIMEOUT_SECONDS, TimeUnit.SECONDS)
        .build()
    
    /**
     * 生成文本回复
     * 
     * @param systemPrompt 系统提示词
     * @param userPrompt 用户提示词
     * @param temperature 采样温度，控制输出的随机性
     * @return 生成的文本
     */
    suspend fun generateText(
        systemPrompt: String,
        userPrompt: String,
        temperature: Double = 0.7
    ): String = withContext(Dispatchers.IO) {
        try {
            val requestJson = buildChatCompletionRequest(
                systemPrompt = systemPrompt,
                userPrompt = userPrompt,
                temperature = temperature
            )
            
            Log.d(TAG, "Sending request to GLM: $requestJson")
            
            val response = sendRequest(requestJson)
            return@withContext extractResponseContent(response)
        } catch (e: Exception) {
            Log.e(TAG, "Error generating text", e)
            throw e
        }
    }
    
    /**
     * 生成带图像的文本回复
     * 
     * @param prompt 提示词
     * @param base64Image Base64编码的图像
     * @param temperature 采样温度
     * @return 生成的文本
     */
    suspend fun generateWithImage(
        prompt: String,
        base64Image: String,
        temperature: Double = 0.7
    ): String = withContext(Dispatchers.IO) {
        try {
            val requestJson = buildImageChatRequest(
                prompt = prompt,
                base64Image = base64Image,
                temperature = temperature
            )
            
            val response = sendRequest(requestJson)
            return@withContext extractResponseContent(response)
        } catch (e: Exception) {
            Log.e(TAG, "Error generating text with image", e)
            throw e
        }
    }
    
    /**
     * 分析用户输入
     * 
     * @param input 用户输入
     * @param structuredOutput 是否需要结构化输出
     * @return 分析结果
     */
    suspend fun analyzeUserInput(
        input: String,
        structuredOutput: Boolean = true
    ): String = withContext(Dispatchers.IO) {
        try {
            val systemPrompt = if (structuredOutput) {
                """
                你是一个精确的指令分析器。分析用户的指令并提取关键信息，返回JSON格式的结构化数据。
                确保JSON格式正确，不要添加额外的解释。如果不确定某个字段的值，该字段设为null。
                """
            } else {
                "你是一个指令分析助手，分析用户的指令并提取关键信息。"
            }
            
            return@withContext generateText(
                systemPrompt = systemPrompt,
                userPrompt = input,
                temperature = 0.3
            )
        } catch (e: Exception) {
            Log.e(TAG, "Error analyzing user input", e)
            throw e
        }
    }
    
    /**
     * 构建聊天请求JSON
     */
    private fun buildChatCompletionRequest(
        systemPrompt: String,
        userPrompt: String,
        temperature: Double
    ): String {
        val messages = JSONArray().apply {
            put(JSONObject().apply {
                put("role", "system")
                put("content", systemPrompt)
            })
            put(JSONObject().apply {
                put("role", "user")
                put("content", userPrompt)
            })
        }
        
        return JSONObject().apply {
            put("model", MODEL_NAME)
            put("messages", messages)
            put("temperature", temperature)
            put("max_tokens", MAX_TOKENS)
        }.toString()
    }
    
    /**
     * 构建带图像的聊天请求JSON
     */
    private fun buildImageChatRequest(
        prompt: String,
        base64Image: String,
        temperature: Double
    ): String {
        val messages = JSONArray().apply {
            // 系统消息
            put(JSONObject().apply {
                put("role", "system")
                put("content", "你是一个视觉分析助手，能够分析图像内容并提供准确的描述。")
            })
            
            // 用户消息（带图像）
            put(JSONObject().apply {
                put("role", "user")
                put("content", JSONArray().apply {
                    // 文本部分
                    put(JSONObject().apply {
                        put("type", "text")
                        put("text", prompt)
                    })
                    
                    // 图像部分
                    put(JSONObject().apply {
                        put("type", "image_url")
                        put("image_url", JSONObject().apply {
                            put("url", "data:image/jpeg;base64,$base64Image")
                        })
                    })
                })
            })
        }
        
        return JSONObject().apply {
            put("model", MODEL_NAME)
            put("messages", messages)
            put("temperature", temperature)
            put("max_tokens", MAX_TOKENS)
        }.toString()
    }
    
    /**
     * 发送请求到GLM API
     */
    private suspend fun sendRequest(requestJson: String): String = withContext(Dispatchers.IO) {
        val mediaType = "application/json; charset=utf-8".toMediaTypeOrNull()
        val requestBody = requestJson.toRequestBody(mediaType)
        
        val request = Request.Builder()
            .url(baseUrl)
            .addHeader("Authorization", "Bearer $apiKey")
            .post(requestBody)
            .build()
        
        val response = client.newCall(request).execute()
        
        if (!response.isSuccessful) {
            val errorBody = response.body?.string() ?: "Unknown error"
            throw RuntimeException("API request failed with code ${response.code}: $errorBody")
        }
        
        val responseBody = response.body?.string() ?: throw RuntimeException("Empty response body")
        return@withContext responseBody
    }
    
    /**
     * 从响应中提取内容
     */
    private fun extractResponseContent(responseBody: String): String {
        try {
            val jsonResponse = JSONObject(responseBody)
            
            // 检查是否有错误
            if (jsonResponse.has("error")) {
                val error = jsonResponse.getJSONObject("error")
                val message = error.optString("message", "Unknown error")
                val type = error.optString("type", "unknown")
                throw RuntimeException("API error: $type - $message")
            }
            
            // 获取返回的内容
            val choices = jsonResponse.getJSONArray("choices")
            if (choices.length() > 0) {
                val choice = choices.getJSONObject(0)
                val message = choice.getJSONObject("message")
                return message.optString("content", "")
            }
            
            throw RuntimeException("No content found in response")
        } catch (e: Exception) {
            Log.e(TAG, "Error parsing response: $responseBody", e)
            throw e
        }
    }
}