package toolkit

import (
	"context"
	"encoding/json"
	"log"
	"ollama-go/config"

	"github.com/tmc/langchaingo/llms"
	"github.com/tmc/langchaingo/llms/ollama"
)

var ollamaLLM *OllamaLLM

type LLM interface {
	SetContent(messages ...llms.MessageContent)
	GetContent() []llms.MessageContent
	GenerateFunction(ctx context.Context, toolsContent string, funcCallback func(fun *llms.FunctionCall) (bool, string, error), callback func(ctx context.Context, chunk []byte) error, options ...llms.CallOption) (string, error)
	GenerateCallback(ctx context.Context, callback func(ctx context.Context, chunk []byte) error, options ...llms.CallOption) (*llms.ContentResponse, error)
}

func GetLLM(model string) LLM {
	if config.CFG.Llm.Ollama.Enable {
		l, _ := NewOllamaLLM(model)
		return l
	} else if config.CFG.Llm.Openai.Enable {
		l, _ := NewOpenaiLLM(model)
		return l
	}
	return nil
}

type OllamaLLM struct {
	model   string
	llm     *ollama.LLM
	content []llms.MessageContent
}

/** 创建模型连接**/
func NewOllamaLLM(model string) (*OllamaLLM, error) {
	cfg := config.CFG.Llm.Ollama
	if model == "" {
		model = cfg.Model
	}
	ollamaLLM = &OllamaLLM{model: model}
	var err error
	ollamaLLM.llm, err = ollama.New(ollama.WithServerURL(cfg.Url), ollama.WithModel(model))
	if err != nil {
		return nil, err
	}
	ollamaLLM.content = []llms.MessageContent{}
	return ollamaLLM, nil
}

func (m *OllamaLLM) GetContent() []llms.MessageContent {
	return m.content
}

/** 设置调用模型提示词**/
func (m *OllamaLLM) SetContent(messages ...llms.MessageContent) {
	m.content = append(m.content, messages...)
}

func (m *OllamaLLM) GetContentSize() int {
	return len(m.content)
}
func (m *OllamaLLM) RemoveContent(index int) {
	if index != -1 {
		m.content = append(m.content[:index], m.content[index+1:]...)
	}
}

/** 调用模型生成结果；同步返回**/
func (m *OllamaLLM) GenerateFunction(ctx context.Context, toolsContent string, funcCallback func(fun *llms.FunctionCall) (bool, string, error), callback func(ctx context.Context, chunk []byte) error, options ...llms.CallOption) (string, error) {
	defer func() {
		recover()
	}()
	m.content = append([]llms.MessageContent{llms.TextParts(llms.ChatMessageTypeSystem, toolsContent)}, m.content...)
	var funcResult string
	var isAi bool
	for {
		resp, err := m.llm.GenerateContent(ctx, m.content, options...)
		if err != nil {
			log.Println("执行函数任务失败：", err.Error())
			return "", err
		}
		choice1 := resp.Choices[0]
		log.Printf("函数上下文内容：%s\n", choice1.Content)

		funCall := new(llms.FunctionCall)
		err = json.Unmarshal([]byte(choice1.Content), funCall)
		if err == nil {
			m.content = append(m.content, llms.TextParts(llms.ChatMessageTypeAI, choice1.Content))
			isAi, funcResult, err = funcCallback(choice1.FuncCall)
			if err != nil {
				log.Println("函数返回失败：", err.Error())
				m.content = append(m.content, llms.TextParts(llms.ChatMessageTypeHuman, err.Error()))
			} else {
				log.Println("函数返回结果：", funcResult)
				if !isAi || funcResult == "" {
					break
				}
				m.content = append(m.content, llms.TextParts(llms.ChatMessageTypeSystem, funcResult))
				if callback != nil {
					options = append(options, llms.WithStreamingFunc(callback))
				}
			}
		} else {
			log.Println(err)
			return choice1.Content, nil
		}
	}
	return funcResult, nil
}

/** 调用模型生成结果；异步回调 **/
func (m *OllamaLLM) GenerateCallback(ctx context.Context, callback func(ctx context.Context, chunk []byte) error, options ...llms.CallOption) (*llms.ContentResponse, error) {
	defer func() {
		recover()
	}()
	if callback != nil {
		options = append(options, llms.WithStreamingFunc(callback))
	}
	resp, err := m.llm.GenerateContent(ctx, m.content, options...)
	m.content = []llms.MessageContent{}
	return resp, err
}
