package agent

import (
	"context"
	"errors"
	"math/rand"
	"net"
	"net/url"
	"strconv"
	"strings"
	"sync"
	"time"

	"openmanus/config"
	"openmanus/llm"

	logger "github.com/axcom/ninego/log"

	"openmanus/llm/ollama"
	"openmanus/llm/openai"
)

// ROLE_TYPE represents the role of the message sender.
type ROLE_TYPE string
type TOOL_CHOICE_TYPE string

// 定义角色和工具选择的常量
const (
	SYSTEM    = "system"
	USER      = "user"
	ASSISTANT = "assistant"
	TOOL      = "tool"

	ToolChoiceAuto     = "auto"
	ToolChoiceNone     = "none"
	ToolChoiceRequired = "required"
)

// 角色和工具选择的有效值集合
var (
	RoleValues       = []ROLE_TYPE{SYSTEM, USER, ASSISTANT, TOOL}
	ToolChoiceValues = []TOOL_CHOICE_TYPE{ToolChoiceAuto, ToolChoiceNone, ToolChoiceRequired}
)

// CreateLLM 根据配置创建适当的LLM实例
func CreateLLM(cfg *config.Config, configName string) (*LLMClient, error) {
	if cfg == nil {
		logger.Error("配置为空，无法创建LLM实例")
		return nil, nil
	}

	// 尝试使用指定名称的配置
	llmCfg, exists := cfg.LLM[configName]
	if !exists {
		// 如果不存在，尝试使用默认配置
		if defaultCfg, exists := cfg.LLM["default"]; exists {
			llmCfg = defaultCfg
		} else if len(cfg.LLM) > 0 {
			// 如果有其他配置，使用第一个
			for _, v := range cfg.LLM {
				llmCfg = v
				break
			}
		} else {
			logger.Error("没有可用的LLM配置")
			return nil, errors.New("")
		}
	}

	// openai api 格式调整
	llm.GPT_Options = llmCfg.GPTOptions

	// 根据API类型创建不同的客户端
	switch llmCfg.APIType {
	case "openai":
		fallthrough
	case "ollama":
		return NewOpenAILLM(configName, &llmCfg)
	default:
		logger.Warn("未知的API类型 %s，使用OpenAI客户端", llmCfg.APIType)
		return NewOpenAILLM(configName, &llmCfg)
	}
}

// LLM 语言模型客户端接口
type ILLM interface {
	// Ask 向语言模型发送请求并获取回复
	Ask(messages []llm.Message, prompt string) (*llm.Message, error)

	// AskTool 向语言模型发送请求并获取工具调用
	AskTool(messages []llm.Message, prompt string, tools []llm.Tool) (*llm.Message, error)
}

// LLM 客户端结构体
type LLMClient struct {
	Provider   llm.Provider
	ConfigName string

	model       string
	maxTokens   int
	temperature float32
	apiType     string
}

var (
	instances = make(map[string]*LLMClient)
	mu        sync.Mutex
)

// NewLLM 创建或获取LLM实例（单例模式）
func NewOpenAILLM(configName string, llmConfig *config.LLMSettings) (*LLMClient, error) {
	mu.Lock()
	defer mu.Unlock()

	if instance, exists := instances[configName]; exists {
		return instance, nil
	}

	// 如果未提供配置，使用默认配置（这里假设你有默认配置的获取方式）
	if llmConfig == nil {
		return nil, errors.New("llm config is required")
	}

	/*/ 处理Azure类型
	if llmConfig.APIType == "azure" {
		if llmConfig.APIVersion == "" {
			return nil, errors.New("api version is required for azure")
		}
		cfg.APIVersion = llmConfig.APIVersion
	}*/

	var client llm.Provider
	switch llmConfig.APIType {
	case "ollama":
		defaultPort := "11434"

		s := strings.TrimSpace(llmConfig.BaseURL)
		scheme, hostport, ok := strings.Cut(s, "://")
		switch {
		case !ok:
			scheme, hostport = "http", s
			if s == "ollama.com" {
				scheme, hostport = "https", "ollama.com:443"
			}
		case scheme == "http":
			defaultPort = "80"
		case scheme == "https":
			defaultPort = "443"
		}

		hostport, path, _ := strings.Cut(hostport, "/")
		host, port, err := net.SplitHostPort(hostport)
		if err != nil {
			host, port = "127.0.0.1", defaultPort
			if ip := net.ParseIP(strings.Trim(hostport, "[]")); ip != nil {
				host = ip.String()
			} else if hostport != "" {
				host = hostport
			}
		}

		if n, err := strconv.ParseInt(port, 10, 32); err != nil || n > 65535 || n < 0 {
			logger.Warn("invalid port, using default", "port", port, "default", defaultPort)
			port = defaultPort
		}

		base := &url.URL{
			Scheme: scheme,
			Host:   net.JoinHostPort(host, port),
			Path:   path,
		}
		logger.Printf("%#v\n", base)
		client = ollama.NewProvider(base, llmConfig.Model)
	case "openai":
		client = openai.NewProvider(llmConfig.APIKey, llmConfig.BaseURL, llmConfig.Model)
	}

	instance := &LLMClient{
		Provider: client,
		model:    llmConfig.Model,
		//maxTokens:   llmConfig.MaxTokens,
		//temperature: llmConfig.Temperature,
		apiType: llmConfig.APIType,
	}

	instances[configName] = instance
	return instance, nil
}

func (lm *LLMClient) Ask(messages []llm.Message, prompt string) (llm.Message, error) {
	return lm.AskTool(messages, prompt, []llm.Tool{})
}

// AskTool 使用工具调用LLM并返回响应
func (lm *LLMClient) AskTool(messages []llm.Message, prompt string, tools []llm.Tool) (llm.Message, error) {
	// Set timeout
	timeout := 300
	ctx, cancel := context.WithTimeout(context.Background(), time.Duration(timeout)*time.Second)
	defer cancel()

	// Make the API call with retries
	var response llm.Message //*openai.ChatCompletionResponse
	var err error
	for attempt := 0; attempt < 6; attempt++ {
		response, err = lm.Provider.CreateMessage(ctx, prompt, messages, tools)
		if err == nil {
			break
		}
		logger.Errorf("Attempt %d failed, retrying...", attempt+1, err)
		//time.Sleep(time.Duration(rand.Intn(60-1+1)+1) * time.Second)
		// 指数退避 + 随机抖动
		waitTime := time.Duration(1<<attempt) * time.Second
		jitter := time.Duration(rand.Int63n(int64(waitTime)))
		totalWait := waitTime + jitter
		if totalWait > 60*time.Second {
			totalWait = 60 * time.Second
		}
		time.Sleep(totalWait / 10)
	}

	if err != nil {
		return nil, err
	}

	// Check if response is valid
	if response == nil /*|| len(response.GetContent()) == 0*/ {
		return nil, errors.New("invalid or empty response from LLM")
	}

	return response, nil
}
