using LangChainServer.Models;
using Microsoft.Extensions.Configuration;
using LangChain.Abstractions;
using LangChain.Providers.OpenAI;
using Microsoft.Extensions.Logging;

namespace LangChainServer.Services
{
    public class LangChainService : ILangChainService
    {
        private readonly IConfiguration _config;
        private readonly ILogger<LangChainService> _logger;
        private readonly IChatService _chatService;
        // Simple in-memory store for conversation history by SessionId
        private static readonly Dictionary<string, List<string>> _memories = new();

        public LangChainService(IConfiguration config, ILogger<LangChainService> logger, IChatService chatService)
        {
            _config = config;
            _logger = logger;
            _chatService = chatService;
        }

        public async Task<LangChainChatResponse> ChatOnceAsync(LangChainChatRequest request, CancellationToken ct = default)
        {
            // Resolve baseUrl (treat empty as unset) and set safe default
            var baseUrl = Environment.GetEnvironmentVariable("OPENAI_BASE_URL");
            if (string.IsNullOrWhiteSpace(baseUrl)) baseUrl = _config["LLM:OpenAI:BaseUrl"];
            if (string.IsNullOrWhiteSpace(baseUrl)) baseUrl = "https://api.siliconflow.cn/v1"; // OpenAI-compatible default

            // Normalize SiliconFlow URL to ensure it includes /v1
            if (!string.IsNullOrWhiteSpace(baseUrl)
                && baseUrl.Contains("siliconflow.cn", StringComparison.OrdinalIgnoreCase)
                && !baseUrl.TrimEnd('/').EndsWith("/v1", StringComparison.OrdinalIgnoreCase))
            {
                baseUrl = baseUrl.TrimEnd('/') + "/v1";
            }

            // Resolve model early
            var resolvedModel = request.Model
                                ?? Environment.GetEnvironmentVariable("OPENAI_MODEL")
                                ?? _config["LLM:OpenAI:Model"];

            if (string.IsNullOrWhiteSpace(resolvedModel))
            {
                throw new InvalidOperationException("Model is not configured. Provide model in request or set env OPENAI_MODEL or config LLM:OpenAI:Model.");
            }

            // Decide routing: use HTTP ChatService when NOT the official OpenAI endpoint
            // Conditions: baseUrl empty/non-openai.com OR model hints non-openai (deepseek/siliconflow)
            bool modelImpliesNonOpenAI = (resolvedModel?.Contains("deepseek", StringComparison.OrdinalIgnoreCase) ?? false)
                                       || (resolvedModel?.Contains("silicon", StringComparison.OrdinalIgnoreCase) ?? false)
                                       || (resolvedModel?.Contains("flow", StringComparison.OrdinalIgnoreCase) ?? false);
            bool isOfficialOpenAI = baseUrl.Contains("openai.com", StringComparison.OrdinalIgnoreCase);
            bool useHttpPath = !isOfficialOpenAI || modelImpliesNonOpenAI;

            _logger.LogInformation("Routing decision -> UseHttpPath: {UseHttp}, BaseUrl: {BaseUrl}, Model: {Model}", useHttpPath, baseUrl, resolvedModel ?? "(null)");

            if (useHttpPath)
            {
                var httpReq = new ChatRequest
                {
                    Provider = "OpenAI", // Http path uses OpenAI-compatible schema
                    Model = resolvedModel,
                    Temperature = request.Temperature,
                    Messages = new List<ChatMessage>
                    {
                        new ChatMessage { Role = "user", Content = request.Input }
                    }
                };

                var httpResp = await _chatService.ChatAsync(httpReq, ct);
                return new LangChainChatResponse
                {
                    Content = httpResp.Content,
                    Model = httpResp.Model
                };
            }

            // Default path (OpenAI official): use LangChain provider
            var lcModel = CreateModel(resolvedModel, request.Temperature);
            var output = await lcModel.GenerateAsync(request.Input, cancellationToken: ct);

            return new LangChainChatResponse
            {
                Content = output,
                Model = resolvedModel ?? ""
            };
        }

        /// <summary>
        /// 处理带有会话记忆的多轮对话。记忆通过 <see cref="LangChainChatRequest.SessionId"/> 进行会话隔离，
        /// 并通过 <see cref="IChatService"/> 调用 OpenAI 兼容的 HTTP 接口（如 SiliconFlow/DeepSeek）。
        /// </summary>
        /// <param name="request">
        /// 对话请求，包含用于维护会话状态的 <see cref="LangChainChatRequest.SessionId"/>，
        /// 用户输入 <see cref="LangChainChatRequest.Input"/>，可选的目标模型 <see cref="LangChainChatRequest.Model"/>，
        /// 以及温度参数 <see cref="LangChainChatRequest.Temperature"/>。
        /// </param>
        /// <param name="ct">异步操作的取消标记。</param>
        /// <returns>返回包含助手回复与实际使用模型的 <see cref="LangChainChatResponse"/>。</returns>
        /// <exception cref="ArgumentException">当 <c>SessionId</c> 为空或仅空白时抛出。</exception>
        /// <remarks>
        /// 会话历史存储在一个静态内存字典中，仅用于演示，数据不会持久化。
        /// 在生产环境或多实例部署场景中，请使用外部存储（如数据库/缓存）来替代该内存方案。
        /// </remarks>
        public async Task<LangChainChatResponse> ChatWithMemoryAsync(LangChainChatRequest request, CancellationToken ct = default)
        {
            if (string.IsNullOrWhiteSpace(request.SessionId))
            {
                throw new ArgumentException("SessionId is required for memory conversation.");
            }

            // Resolve baseUrl and model similar to ChatOnce to ensure SiliconFlow routing
            var baseUrl = Environment.GetEnvironmentVariable("OPENAI_BASE_URL")
                         ?? _config["LLM:OpenAI:BaseUrl"]
                         ?? "https://api.siliconflow.cn/v1";
            if (baseUrl.Contains("siliconflow.cn", StringComparison.OrdinalIgnoreCase)
                && !baseUrl.TrimEnd('/').EndsWith("/v1", StringComparison.OrdinalIgnoreCase))
            {
                baseUrl = baseUrl.TrimEnd('/') + "/v1";
            }

            var resolvedModel = request.Model
                                ?? Environment.GetEnvironmentVariable("OPENAI_MODEL")
                                ?? _config["LLM:OpenAI:Model"]
                                ?? "deepseek-ai/DeepSeek-R1";

            _logger.LogInformation("Memory chat routing via HTTP -> BaseUrl: {BaseUrl}, Model: {Model}", baseUrl, resolvedModel);

            // Get or create memory for the session
            if (!_memories.TryGetValue(request.SessionId!, out var messages))
            {
                messages = new List<string>();
                _memories[request.SessionId!] = messages;
            }

            // Add user message to history
            messages.Add($"User: {request.Input}");

            // Convert string memory to ChatMessage list for OpenAI-compatible API
            var chatMessages = new List<ChatMessage>();
            foreach (var line in messages)
            {
                if (line.StartsWith("User:", StringComparison.OrdinalIgnoreCase))
                {
                    chatMessages.Add(new ChatMessage { Role = "user", Content = line.Substring(line.IndexOf(':') + 1).Trim() });
                }
                else if (line.StartsWith("Assistant:", StringComparison.OrdinalIgnoreCase))
                {
                    chatMessages.Add(new ChatMessage { Role = "assistant", Content = line.Substring(line.IndexOf(':') + 1).Trim() });
                }
                else
                {
                    // Fallback treat as user text
                    chatMessages.Add(new ChatMessage { Role = "user", Content = line });
                }
            }

            var httpReq = new ChatRequest
            {
                Provider = "OpenAI",
                Model = resolvedModel,
                Temperature = request.Temperature,
                Messages = chatMessages
            };

            var httpResp = await _chatService.ChatAsync(httpReq, ct);

            // Add assistant response to memory
            messages.Add($"Assistant: {httpResp.Content}");

            return new LangChainChatResponse
            {
                Content = httpResp.Content,
                Model = httpResp.Model
            };
        }

        public async Task<LangChainChatResponse> StructuredAnswerAsync(LangChainChatRequest request, CancellationToken ct = default)
        {
            // Resolve model like other paths
            var resolvedModel = request.Model
                                ?? Environment.GetEnvironmentVariable("OPENAI_MODEL")
                                ?? _config["LLM:OpenAI:Model"]
                                ?? "deepseek-ai/DeepSeek-R1";

            // Build messages for OpenAI-compatible API: system instruction + user query
            var systemPrompt = "你是一个专业的客服助手，请严格按固定结构输出：问题分类/回答内容/建议操作。";
            var userPrompt = $"分类: {(string.IsNullOrWhiteSpace(request.Category) ? "通用咨询" : request.Category)}\n问题: {request.Input}\n请严格遵循格式输出。";

            var httpReq = new ChatRequest
            {
                Provider = "OpenAI",
                Model = resolvedModel,
                Temperature = request.Temperature,
                Messages = new List<ChatMessage>
                {
                    new ChatMessage { Role = "system", Content = systemPrompt },
                    new ChatMessage { Role = "user", Content = userPrompt }
                }
            };

            var httpResp = await _chatService.ChatAsync(httpReq, ct);

            return new LangChainChatResponse
            {
                Content = httpResp.Content,
                Model = httpResp.Model
            };
        }

        private OpenAiChatModel CreateModel(string? modelOverride, double temperature)
        {
            var apiKey = Environment.GetEnvironmentVariable("OPENAI_API_KEY")
                         ?? _config["LLM:OpenAI:ApiKey"]
                         ?? throw new InvalidOperationException("OpenAI API key is not configured.");

            // Allow custom base URL (DeepSeek uses OpenAI-compatible API)
            var baseUrl = Environment.GetEnvironmentVariable("OPENAI_BASE_URL")
                         ?? _config["LLM:OpenAI:BaseUrl"]
                         ?? "https://api.siliconflow.cn/v1";

            var model = modelOverride
                        ?? Environment.GetEnvironmentVariable("OPENAI_MODEL")
                        ?? _config["LLM:OpenAI:Model"]
                        ?? "deepseek-chat";

            // Log resolved config (mask key for safety)
            var maskedKey = apiKey.Length > 8 ? $"{apiKey[..4]}***{apiKey[^4..]}" : "***";
            _logger.LogInformation("LangChain OpenAI provider -> BaseUrl: {BaseUrl}, Model: {Model}, ApiKey: {Key}", baseUrl, model, maskedKey);

            // v0.13.1 provider lacks endpoint arg/prop we can rely on here for DeepSeek, but we keep it for OpenAI default path
            var provider = new OpenAiProvider(apiKey);

            return new OpenAiChatModel(provider, model)
            {
                Settings = new OpenAiChatSettings
                {
                    Temperature = (float)temperature
                }
            };
        }
    }
}
