package agent

import (
	"context"
	"encoding/json"
	"fmt"
	"strings"
	"sync/atomic"
	"time"

	"github.com/google/uuid"
	"techoiceness.com/aiagent/llm-gateway/internal/entity"
	"techoiceness.com/aiagent/llm-gateway/internal/llm"
	"techoiceness.com/aiagent/llm-gateway/internal/llm/types"
	"techoiceness.com/aiagent/llm-gateway/internal/repository"
	"techoiceness.com/aiagent/llm-gateway/internal/server/sse"
	"techoiceness.com/aiagent/llm-gateway/internal/tools"

	"github.com/rs/zerolog/log"
)

type AgentConfig struct {
	MaxToolCallDepth int           // 最大工具调用深度
	RequestTimeout   time.Duration // 请求超时时间
	StreamTimeout    time.Duration // 流式响应超时时间
}

func DefaultAgentConfig() *AgentConfig {
	return &AgentConfig{
		MaxToolCallDepth: 5,
		RequestTimeout:   30 * time.Second,
		StreamTimeout:    time.Second,
	}
}

type Agent struct {
	// 会话id
	conversationID uuid.UUID
	// LLM提供者管理器
	llmProviderManager *llm.ProviderManager
	// LLM消息仓库
	llmMessageRepo repository.LLMMessageRepository
	// 会话历史消息
	messages []types.Message
	// 是否已从数据库加载消息
	dbMessagesLoaded bool
	// 工具管理器
	toolManager *tools.ToolManager
	// 最后活跃时间，unix时间戳(秒)
	lastActiveTime int64
	// 系统提示词
	systemPrompt string
	// 配置
	config *AgentConfig
}

func NewAgent(
	conversationID uuid.UUID,
	llmProviderManager *llm.ProviderManager,
	repo repository.LLMMessageRepository,
) *Agent {
	agent := &Agent{
		conversationID:     conversationID,
		llmProviderManager: llmProviderManager,
		llmMessageRepo:     repo,
		config:             DefaultAgentConfig(),
	}
	atomic.StoreInt64(&agent.lastActiveTime, time.Now().Unix())
	return agent
}

func NewAgentWithConfig(
	conversationID uuid.UUID,
	llmProviderManager *llm.ProviderManager,
	llmMessageRepo repository.LLMMessageRepository,
	config *AgentConfig,
) *Agent {
	agent := &Agent{
		conversationID:     conversationID,
		llmProviderManager: llmProviderManager,
		llmMessageRepo:     llmMessageRepo,
		config:             config,
	}
	atomic.StoreInt64(&agent.lastActiveTime, time.Now().Unix())
	return agent
}

// SetSystemPrompt 设置系统提示词
func (agent *Agent) SetSystemPrompt(prompt string) {
	agent.systemPrompt = prompt
}

// GetSystemPrompt 获取当前的系统提示词
func (agent *Agent) GetSystemPrompt() string {
	return agent.systemPrompt
}

// SetToolManager 设置工具管理器
func (agent *Agent) SetToolManager(toolManager *tools.ToolManager) {
	agent.toolManager = toolManager
}

// loadMessagesIfNeeded 确保消息已从数据库加载
func (a *Agent) loadMessagesIfNeeded() error {
	if !a.dbMessagesLoaded {
		ctx := context.Background()
		entityMessages, err := a.llmMessageRepo.GetMessagesByConversation(ctx, a.conversationID)
		if err != nil {
			return fmt.Errorf("failed to load messages from database: %w", err)
		}

		a.messages = make([]types.Message, len(entityMessages))
		for i, entityMsg := range entityMessages {
			a.messages[i] = entityMsg.ToProviderMessage()
		}

		if len(a.messages) == 0 && a.systemPrompt != "" {
			// 如果没有消息且有系统提示词，则添加一条系统消息
			systemMessage := types.Message{
				Role:    types.RoleSystem,
				Content: a.systemPrompt,
			}
			if err := a.addMessage(systemMessage, nil, nil); err != nil {
				return fmt.Errorf("failed to add system message: %w", err)
			}
		}

		a.dbMessagesLoaded = true
	}
	return nil
}

// addMessage 添加消息到缓存并持久化到数据库
// metadata: 可选的元数据信息
// conversationRoundID: 可选的对话轮次ID
func (a *Agent) addMessage(message types.Message, metadata *entity.LLMMessageMetadata, conversationRoundID *uuid.UUID) error {
	// 添加到内存缓存
	a.messages = append(a.messages, message)

	// 手动构造 LLMMessage 实体
	entityMessage := &entity.LLMMessage{
		ConversationID:   a.conversationID,
		Role:             message.Role,
		Content:          message.Content,
		Name:             message.Name,
		ReasoningContent: message.ReasoningContent,
		ToolCallID:       message.ToolCallID,
	}

	if conversationRoundID != nil {
		entityMessage.ConversationRoundID = *conversationRoundID
	}

	// 处理工具调用
	if len(message.ToolCalls) > 0 {
		toolCallsJSON, err := json.Marshal(message.ToolCalls)
		if err != nil {
			a.messages = a.messages[:len(a.messages)-1]
			return fmt.Errorf("failed to marshal tool calls: %w", err)
		}
		entityMessage.ToolCalls = toolCallsJSON
	}

	// 设置元数据
	if metadata != nil {
		entityMessage.FinishReason = metadata.FinishReason
		entityMessage.Model = metadata.Model
		entityMessage.PromptTokens = metadata.PromptTokens
		entityMessage.CompletionTokens = metadata.CompletionTokens
		entityMessage.TotalTokens = metadata.TotalTokens
		entityMessage.ProcessingTime = metadata.ProcessingTime
		entityMessage.RawRequestBody = metadata.RawRequestBody   // 直接保存原始请求体
		entityMessage.RawResponseBody = metadata.RawResponseBody // 直接保存原始响应体
	}

	// 持久化到数据库
	ctx := context.Background()
	if err := a.llmMessageRepo.SaveMessage(ctx, entityMessage); err != nil {
		a.messages = a.messages[:len(a.messages)-1]
		return fmt.Errorf("failed to save message to database: %w", err)
	}

	return nil
}

func (a *Agent) Chat(userInput string, conversationRoundID uuid.UUID) (*sse.ChatResponse, error) {
	// 更新最后活跃时间
	atomic.StoreInt64(&a.lastActiveTime, time.Now().Unix())

	if err := a.loadMessagesIfNeeded(); err != nil {
		log.Error().Str("process", "chat").Str("conversation_id", a.conversationID.String()).Err(err).Msg("加载消息失败")
		return nil, err
	}

	userMessage := types.Message{
		Role:    types.RoleUser,
		Content: userInput,
	}

	// 保存用户消息，关联到对话轮次
	if err := a.addMessage(userMessage, nil, &conversationRoundID); err != nil {
		return nil, err
	}

	availableModelInfo := a.llmProviderManager.GetAvailableModel()
	if availableModelInfo == nil {
		return nil, fmt.Errorf("无有效模型")
	}

	req := types.Request{
		Model:    availableModelInfo.Name,
		Messages: a.messages,
	}

	if a.toolManager != nil {
		tools := a.toolManager.GetToolDefinitions()
		if len(tools) > 0 {
			req.Tools = tools
			req.ToolChoice = "auto" // 让模型自动决定是否调用工具
		}
	}

	resp, err := a.request(req, 0, conversationRoundID)
	if err != nil {
		return sse.NewChatResponse(a.conversationID, sse.StatusError).
			WithError("REQUEST_ERROR", "请求失败", err.Error(), true), nil
	}

	return LLMResponseToChatResponse(resp, a.conversationID)
}

func (a *Agent) request(req types.Request, depth int, conversationRoundID uuid.UUID) (*types.Response, error) {
	// 检查provider是否可用
	availableProvider := a.llmProviderManager.GetAvailableProvider()
	if availableProvider == nil {
		return nil, fmt.Errorf("无有效模型")
	}

	startTime := time.Now()
	ctx, cancel := context.WithTimeout(context.Background(), a.config.RequestTimeout)
	defer cancel()
	resp, err := availableProvider.Chat(ctx, req)
	if err != nil {
		return nil, err
	}
	processingTime := time.Since(startTime).Milliseconds()

	if len(resp.Choices) == 0 {
		return nil, nil // No choices returned
	}

	choice := resp.Choices[0]

	// 构建元数据 - 简化为只保存原始数据
	metadata := &entity.LLMMessageMetadata{
		FinishReason:     choice.FinishReason,
		Model:            resp.Model,
		PromptTokens:     resp.Usage.PromptTokens,
		CompletionTokens: resp.Usage.CompletionTokens,
		TotalTokens:      resp.Usage.TotalTokens,
		ProcessingTime:   processingTime,
		RawRequestBody:   resp.RawRequestBody,
		RawResponseBody:  resp.RawResponseBody,
	}

	// 添加消息到缓存并持久化（带元数据）
	if err := a.addMessage(choice.Message, metadata, &conversationRoundID); err != nil {
		return nil, fmt.Errorf("failed to save assistant message: %w", err)
	}

	if choice.FinishReason == "tool_calls" {
		// 检查递归深度
		if depth >= a.config.MaxToolCallDepth {
			return nil, fmt.Errorf("tool call depth exceeded maximum limit of %d", a.config.MaxToolCallDepth)
		}

		if a.toolManager != nil {
			var toolCallMessages []types.Message
			for _, toolCall := range choice.Message.ToolCalls {
				var result string

				_, ok := a.toolManager.GetTool(toolCall.Function.Name)
				if !ok {

					result = fmt.Sprintf("Error: Tool '%s' not found in registry", toolCall.Function.Name)
				} else {
					var err error
					result, err = a.toolManager.ExecuteTool(toolCall.Function.Name, toolCall.Function.Arguments)
					if err != nil {
						log.Error().
							Str("process", "tool_call").
							Str("conversation_id", a.conversationID.String()).
							Str("tool_name", toolCall.Function.Name).
							Err(err).
							Msg("工具执行失败")
						result = fmt.Sprintf("Error: Tool execution failed - %s", err.Error())
					}
				}

				toolCallMessage := types.Message{
					Role:       types.RoleTool,
					ToolCallID: toolCall.ID,
					Content:    result,
				}
				toolCallMessages = append(toolCallMessages, toolCallMessage)
			}

			// 逐个添加工具调用消息，关联到对话轮次
			for _, msg := range toolCallMessages {
				if err := a.addMessage(msg, nil, &conversationRoundID); err != nil {
					return nil, fmt.Errorf("failed to add tool call message: %w", err)
				}
			}

			availableModelInfo := a.llmProviderManager.GetAvailableModel()
			if availableModelInfo == nil {
				return nil, fmt.Errorf("无有效模型")
			}

			toolCallRequest := types.Request{
				Model:    availableModelInfo.Name,
				Messages: a.messages,
				Stream:   false,
			}

			tools := a.toolManager.GetToolDefinitions()
			if len(tools) > 0 {
				toolCallRequest.Tools = tools
				toolCallRequest.ToolChoice = "auto" // 让模型自动决定是否调用工具
			}

			return a.request(toolCallRequest, depth+1, conversationRoundID)
		}
	}

	return resp, nil
}

// ChatStreamResult 流式聊天结果
type ChatStreamResult struct {
	MessageChan <-chan *sse.ChatResponse
	Cancel      context.CancelFunc
}

// ChatStream 使用流式聊天方式处理用户输入
// clientCtx: 客户端连接上下文，用于监控客户端状态
// userInput: 用户输入内容
// conversationRoundID: 对话轮次ID，用于关联这轮对话的所有LLM消息
func (a *Agent) ChatStream(clientCtx context.Context, userInput string, conversationRoundID uuid.UUID) (*ChatStreamResult, error) {
	atomic.StoreInt64(&a.lastActiveTime, time.Now().Unix())

	if err := a.loadMessagesIfNeeded(); err != nil {
		return nil, err
	}

	userMessage := types.Message{
		Role:    types.RoleUser,
		Content: userInput,
	}

	// 保存用户消息，关联到对话轮次
	if err := a.addMessage(userMessage, nil, &conversationRoundID); err != nil {
		return nil, err
	}

	availableModelInfo := a.llmProviderManager.GetAvailableModel()
	if availableModelInfo == nil {
		return nil, fmt.Errorf("无有效模型")
	}

	req := types.Request{
		Model:    availableModelInfo.Name,
		Messages: a.messages,
		Stream:   true,
		StreamOptions: &types.StreamOptions{
			IncludeUsage: true,
		},
	}

	if a.toolManager != nil {
		tools := a.toolManager.GetToolDefinitions()
		if len(tools) > 0 {
			req.Tools = tools
			req.ToolChoice = "auto" // 让模型自动决定是否调用工具
		}
	}

	// 创建独立的处理上下文，确保AI处理完整性
	processCtx, cancel := context.WithTimeout(context.Background(), 5*time.Minute)

	llmResponseChan, err := a.requestStream(processCtx, req, conversationRoundID)
	if err != nil {
		cancel()
		return nil, fmt.Errorf("failed to start chat stream: %w", err)
	}

	messageChan := make(chan *sse.ChatResponse, 10) // 添加缓冲避免阻塞

	go func() {
		defer close(messageChan)
		defer cancel()

		for {
			select {
			case <-processCtx.Done():
				// 处理上下文取消（超时等），发送错误并退出
				errorResp := sse.NewChatResponse(a.conversationID, sse.StatusError).
					WithError("PROCESS_TIMEOUT", "处理超时", processCtx.Err().Error(), false)
				select {
				case messageChan <- errorResp:
				case <-time.After(a.config.StreamTimeout):
				}
				return
			case streamResp, ok := <-llmResponseChan:
				if !ok {
					// llmResponseChan已关闭，AI处理完成
					return
				}

				message, err := LLMStreamResponseToChatResponse(&streamResp, a.conversationID)
				if err != nil {
					log.Error().Str("process", "chat_stream").Str("conversation_id", a.conversationID.String()).
						Err(err).
						Msg("流响应转换为消息失败")
					continue
				}

				// 非阻塞发送，如果客户端断开也不影响AI处理
				select {
				case messageChan <- message:
				case <-time.After(a.config.StreamTimeout):
					// 客户端可能断开，但继续AI处理
					log.Warn().Str("process", "chat_stream").Str("conversation_id", a.conversationID.String()).
						Msg("客户端可能已断开连接，但AI处理继续进行")
				}
			}
		}
	}()

	return &ChatStreamResult{
		MessageChan: messageChan,
		Cancel:      cancel,
	}, nil
}

type ToolCallState struct {
	ID        string
	Type      string
	Function  types.StreamChoiceToolCallFunction
	Arguments strings.Builder
}

func (a *Agent) requestStream(ctx context.Context, req types.Request, conversationRoundID uuid.UUID) (<-chan types.StreamResponse, error) {
	// 检查provider是否可用
	availableProvider := a.llmProviderManager.GetAvailableProvider()
	if availableProvider == nil {
		return nil, fmt.Errorf("无有效模型")
	}

	availableModelInfo := a.llmProviderManager.GetAvailableModel()
	if availableModelInfo == nil {
		return nil, fmt.Errorf("无有效模型")
	}

	sourceStream, err := availableProvider.ChatStream(ctx, req)
	if err != nil {
		return nil, err
	}

	exposedStream := make(chan types.StreamResponse)

	go func() {
		defer close(exposedStream)
		a.processStreamResponse(ctx, sourceStream, exposedStream, 0, conversationRoundID, req)
	}()

	return exposedStream, nil
}

func (a *Agent) processStreamResponse(ctx context.Context, source <-chan types.StreamResponse, exposed chan types.StreamResponse, depth int, conversationRoundID uuid.UUID, originalRequest types.Request) {
	var finalContent strings.Builder
	var finalReasoningContent strings.Builder
	toolCallStates := make(map[int]*ToolCallState)
	var finalFinishReason string
	var finalModel string
	var finalUsage *types.Usage // 尝试从流式响应中获取usage信息
	var rawRequestBody string   // 原始请求体
	var rawResponseBody string  // 原始响应体
	startTime := time.Now()

	for {
		select {
		case <-ctx.Done():
			// 上下文取消，退出处理
			return
		case resp, ok := <-source:
			if !ok {
				// source channel已关闭
				goto processComplete
			}

			// 检查是否是包含原始数据的最终响应
			if resp.RawRequestBody != "" || resp.RawResponseBody != "" {
				rawRequestBody = resp.RawRequestBody
				rawResponseBody = resp.RawResponseBody
				// 这是最终响应，不需要发送给客户端
				goto processComplete
			}

			select {
			case exposed <- resp:
			case <-ctx.Done():
				return
			case <-time.After(a.config.StreamTimeout):
				// 如果超时无法发送，可能消费者已退出，避免阻塞
				return
			}

			if resp.Error != nil {
				log.Error().
					Str("process", "stream_response").
					Str("conversation_id", a.conversationID.String()).
					Err(resp.Error).
					Msg("流式响应错误")
				return
			}

			if len(resp.Choices) == 0 {
				continue // 跳过空choices的响应
			}

			choice := resp.Choices[0]
			delta := choice.Delta
			finalContent.WriteString(delta.Content)
			finalReasoningContent.WriteString(delta.ReasoningContent)
			for _, chunk := range delta.ToolCalls {
				if _, ok := toolCallStates[chunk.Index]; !ok {
					toolCallStates[chunk.Index] = &ToolCallState{
						ID:   chunk.ID,
						Type: chunk.Type,
					}
				}
				state := toolCallStates[chunk.Index]
				if chunk.ID != "" {
					state.ID = chunk.ID
				}
				if chunk.Type != "" {
					state.Type = chunk.Type
				}
				if chunk.Function.Name != "" {
					state.Function.Name = chunk.Function.Name
				}
				if chunk.Function.Arguments != "" {
					state.Arguments.WriteString(chunk.Function.Arguments)
				}
			}
			if choice.FinishReason != "" {
				finalFinishReason = choice.FinishReason
			}

			// 收集流式响应的元数据
			if resp.Model != "" {
				finalModel = resp.Model
			}
			// 收集usage信息（如果流式响应的最后一个包包含usage）
			if resp.Usage != nil {
				finalUsage = resp.Usage
			}
		}
	}

processComplete:

	assistantResponse := types.Message{
		Role:             types.RoleAssistant,
		Content:          finalContent.String(),
		ReasoningContent: finalReasoningContent.String(),
	}
	if len(toolCallStates) > 0 {
		assistantResponse.ToolCalls = make([]types.ToolCall, len(toolCallStates))
		for i, state := range toolCallStates {
			assistantResponse.ToolCalls[i] = types.ToolCall{
				ID:   state.ID,
				Type: state.Type,
				Function: types.ToolCallFunction{
					Name:      state.Function.Name,
					Arguments: state.Arguments.String(),
				},
			}
		}
	}

	// 构建流式响应的元数据
	processingTime := time.Since(startTime).Milliseconds()

	metadata := &entity.LLMMessageMetadata{
		FinishReason:    finalFinishReason,
		Model:           finalModel,
		ProcessingTime:  processingTime,
		RawRequestBody:  rawRequestBody,  // 来自provider的原始请求体
		RawResponseBody: rawResponseBody, // 来自provider的原始SSE响应体
	}

	// 如果流式响应提供了usage信息，使用实际数据
	if finalUsage != nil {
		metadata.PromptTokens = finalUsage.PromptTokens
		metadata.CompletionTokens = finalUsage.CompletionTokens
		metadata.TotalTokens = finalUsage.TotalTokens
	}

	// 使用统一的消息添加逻辑（带元数据和对话轮次关联）
	if err := a.addMessage(assistantResponse, metadata, &conversationRoundID); err != nil {
		log.Error().
			Str("process", "save_response").
			Str("conversation_id", a.conversationID.String()).
			Err(err).
			Msg("保存助手响应失败")
		// 即使保存失败也继续处理，但记录错误
	}

	if finalFinishReason == "tool_calls" {
		// 检查递归深度
		if depth >= a.config.MaxToolCallDepth {
			log.Error().
				Str("process", "tool_call").
				Str("conversation_id", a.conversationID.String()).
				Int("depth", depth).
				Int("max_depth", a.config.MaxToolCallDepth).
				Msg("工具调用深度超过最大限制")
			return
		}

		var toolCallMessages []types.Message
		for _, toolCall := range assistantResponse.ToolCalls {
			if a.toolManager != nil {
				var result string
				_, ok := a.toolManager.GetTool(toolCall.Function.Name)
				if !ok {
					log.Error().
						Str("process", "tool_call").
						Str("conversation_id", a.conversationID.String()).
						Msg("工具在注册表中未找到")
					result = fmt.Sprintf("Error: Tool '%s' not found in registry", toolCall.Function.Name)
				} else {
					// Execute the tool call
					var err error
					result, err = a.toolManager.ExecuteTool(toolCall.Function.Name, toolCall.Function.Arguments)
					if err != nil {
						log.Error().
							Str("process", "tool_call").
							Str("conversation_id", a.conversationID.String()).
							Str("tool_name", toolCall.Function.Name).
							Err(err).
							Msg("工具执行失败")
						result = fmt.Sprintf("Error: Tool execution failed - %s", err.Error())
					} else {
						log.Info().
							Str("process", "tool_call").
							Str("conversation_id", a.conversationID.String()).
							Str("tool_name", toolCall.Function.Name).
							Int("result_length", len(result)).
							Msg("工具执行成功")
					}
				}

				// Always append the result to the messages, even if there was an error
				toolCallMessages = append(toolCallMessages, types.Message{
					Role:       types.RoleTool,
					ToolCallID: toolCall.ID,
					Content:    result,
				})
			}
		}

		// 使用批量添加消息的方法，关联到对话轮次
		for _, msg := range toolCallMessages {
			if err := a.addMessage(msg, nil, &conversationRoundID); err != nil {
				log.Error().
					Str("process", "save_response").
					Str("conversation_id", a.conversationID.String()).
					Err(err).
					Msg("保存工具调用消息失败")
			}
		}

		availableModelInfo := a.llmProviderManager.GetAvailableModel()
		if availableModelInfo == nil {
			log.Error().
				Str("process", "save_response").
				Str("conversation_id", a.conversationID.String()).
				Msg("没有可用的模型")
			return
		}

		newLLMRequest := types.Request{
			Model:    availableModelInfo.Name,
			Messages: a.messages,
			Stream:   true,
		}

		availableProvider := a.llmProviderManager.GetAvailableProvider()
		if availableProvider == nil {
			log.Error().
				Str("process", "save_response").
				Str("conversation_id", a.conversationID.String()).
				Msg("没有可用的提供者")
			return
		}

		newSourceStream, err := availableProvider.ChatStream(ctx, newLLMRequest)
		if err != nil {
			log.Error().
				Str("process", "save_response").
				Str("conversation_id", a.conversationID.String()).
				Err(err).
				Msg("创建工具调用流请求失败")
			return
		}
		a.processStreamResponse(ctx, newSourceStream, exposed, depth+1, conversationRoundID, originalRequest)
	}
}

// getMessages 获取当前对话的所有消息
func (a *Agent) GetMessages() []types.Message {
	return a.messages
}

// getMessageCount 获取当前对话的消息数量
func (a *Agent) GetMessageCount() int {
	return len(a.messages)
}

// HasMessages 检查agent是否有消息记录
func (a *Agent) HasMessages() bool {
	// 先尝试从内存中检查
	if len(a.messages) > 0 {
		return true
	}

	// 如果内存中没有消息且还没有从数据库加载过，需要检查数据库
	if !a.dbMessagesLoaded {
		if err := a.loadMessagesIfNeeded(); err == nil {
			return len(a.messages) > 0
		}
	}

	return false
}

// clearMessages 清空内存中的消息缓存（不影响数据库）
func (a *Agent) ClearMessagesCache() {
	a.messages = []types.Message{}
	a.dbMessagesLoaded = false
}

// reloadMessages 重新从数据库加载消息
func (a *Agent) ReloadMessages() error {
	a.messages = []types.Message{}
	a.dbMessagesLoaded = false
	return a.loadMessagesIfNeeded()
}

// GetConversationHistory 获取完整的对话历史
func (a *Agent) GetConversationHistory() ([]types.Message, error) {
	if err := a.loadMessagesIfNeeded(); err != nil {
		return nil, fmt.Errorf("failed to load messages: %w", err)
	}
	return a.messages, nil
}

// GetLatestAssistantMessage 获取最新的助手回复
func (a *Agent) GetLatestAssistantMessage() (*types.Message, error) {
	if err := a.loadMessagesIfNeeded(); err != nil {
		return nil, fmt.Errorf("failed to load messages: %w", err)
	}

	// 从后往前查找最新的助手消息
	for i := len(a.messages) - 1; i >= 0; i-- {
		if a.messages[i].Role == types.RoleAssistant {
			return &a.messages[i], nil
		}
	}

	return nil, fmt.Errorf("no assistant message found")
}
