package gtp

import (
	"context"
	"encoding/json"
	"errors"
	"github.com/sashabaranov/go-openai"
	"math"
	"openwechat/config"
	"openwechat/logs"
	"openwechat/utils"
	tiktokens "openwechat/utils/tokenizer"
)

type HistoryDB struct {
	StartIndex  int64                          `json:"start_index"`
	TotalTokens int                            `json:"tokens_num"`
	Messages    []openai.ChatCompletionMessage `json:"messages"`
}

func Openai(user string, msg string) ([]openai.ChatCompletionChoice, error) {
	messages := []openai.ChatCompletionMessage{
		{Role: openai.ChatMessageRoleSystem, Content: "你是一个幽默风趣，温柔智慧的小助手，在回答中尽量不要体现自己是一个机器人，名字叫ChenSir！"},
	}
	historyMessage, err := getHistoryMessage(user)
	if err != nil {
		return nil, err
	}
	//只保留0.7个点的历史消息数据
	if float64(historyMessage.TotalTokens) >= math.Ceil(float64(config.Config.MaxToken)*0.3) && historyMessage.StartIndex == 0 {
		historyMessage.StartIndex = int64(len(historyMessage.Messages) - 1)
	}
	if historyMessage.TotalTokens > config.Config.MaxToken {
		historyMessage.Messages = historyMessage.Messages[historyMessage.StartIndex:]
		historyMessage.StartIndex = 0
		total, index, num, _ := totalTokens(historyMessage)
		if total > config.Config.MaxToken {
			historyMessage.Messages = append(historyMessage.Messages[0:index], historyMessage.Messages[index:]...)
		}
		if total-num > config.Config.MaxToken {
			historyMessage.Messages = make([]openai.ChatCompletionMessage, 0)
		}
	}
	historyMessage.Messages = append(historyMessage.Messages, openai.ChatCompletionMessage{
		Role:    openai.ChatMessageRoleUser,
		Content: msg,
	})
	response, err := sendMessage(openai.ChatCompletionRequest{
		Model:       openai.GPT3Dot5Turbo,
		Messages:    append(messages, historyMessage.Messages...),
		MaxTokens:   config.Config.MaxToken,
		Temperature: 0.6,
		User:        user,
	})
	if err != nil {
		return nil, err
	}
	for _, choice := range response.Choices {
		historyMessage.Messages = append(historyMessage.Messages, openai.ChatCompletionMessage{
			Role:    openai.ChatMessageRoleAssistant,
			Content: utils.CompressAndCleanString(choice.Message.Content),
		})
	}
	historyMessage.TotalTokens = response.Usage.TotalTokens
	go putHistoryMessage(user, historyMessage)
	return response.Choices, nil
}

func sendMessage(message openai.ChatCompletionRequest) (openai.ChatCompletionResponse, error) {
	defaultConfig := openai.DefaultConfig(config.Config.ApiKey)
	defaultConfig.BaseURL = config.Config.BaseUrl
	client := openai.NewClientWithConfig(defaultConfig)
	response, err := client.CreateChatCompletion(
		context.Background(),
		message,
	)
	if err != nil {
		return response, err
	}
	for _, choice := range response.Choices {
		if choice.FinishReason == "length" {
			message.N += 1
			message.MaxTokens += 1024
			return sendMessage(message)
		}
	}
	return response, nil
}

func getHistoryMessage(user string) (HistoryDB, error) {
	historyMessage := HistoryDB{Messages: make([]openai.ChatCompletionMessage, 0)}
	exist, err := config.DB.Has([]byte(user), nil)
	if err != nil {
		return historyMessage, err
	}
	if !exist {
		return historyMessage, nil
	}
	value, err := config.DB.Get([]byte(user), nil)
	if err != nil {
		return historyMessage, err
	}
	if err = json.Unmarshal(value, &historyMessage); err != nil {
		return historyMessage, errors.New("unmarshal history message error")
	}
	return historyMessage, nil
}

func putHistoryMessage(user string, historyMessage HistoryDB) {
	marshal, err := json.Marshal(historyMessage)
	if err != nil {
		logs.Error("put history message marshal error")
		return
	}
	if err := config.DB.Put([]byte(user), marshal, nil); err != nil {
		logs.Error("put history message error:%s", err.Error())
	}
}

func totalTokens(historyMessage HistoryDB) (total int, maxMsgIndex int, maxTokensNum int, err error) {
	encoder, err := tiktokens.EncodingForModel(openai.GPT3Dot5Turbo)
	if err != nil {
		return
	}
	total = len(historyMessage.Messages)*4 + 2
	for i, message := range historyMessage.Messages {
		roleNum := len(encoder.Encode(message.Role, nil, nil))
		content := encoder.Encode(message.Content, nil, nil)
		nameNum := 0
		if message.Name != "" {
			nameNum = len(encoder.Encode(message.Name, nil, nil))
			nameNum -= 1
		}
		total += roleNum + len(content) + nameNum
		if maxTokensNum == 0 {
			maxTokensNum = roleNum + len(content) + nameNum
			continue
		}
		if maxTokensNum < roleNum+len(content)+nameNum {
			maxMsgIndex = i
			maxTokensNum = roleNum + len(content) + nameNum
		}
	}
	return
}

func completionMessages(user, msg string) ([]openai.ChatCompletionMessage, error) {
	messages := []openai.ChatCompletionMessage{
		{Role: openai.ChatMessageRoleSystem, Content: "你是一个幽默风趣的小助手"},
	}
	historyMessage, err := getHistoryMessage(user)
	if err != nil {
		return nil, err
	}
	encoder, err := tiktokens.EncodingForModel(openai.GPT3Dot5Turbo)
	if err != nil {
		return nil, err
	}
	// every message follows <im_start>{role/name}\n{content}<im_end>
	// every reply is primed with <im_start>assistant
	totalTokens := len(historyMessage.Messages)*4 + 2
	for _, message := range messages {
		roleNum := len(encoder.Encode(message.Role, nil, nil))
		content := encoder.Encode(message.Content, nil, nil)
		nameNum := 0
		if message.Name != "" {
			nameNum = len(encoder.Encode(message.Name, nil, nil))
			nameNum -= 1
		}
		if len(content) > 200 {
			content = append(content[0:100], content[len(content)-101:]...)
		}
		totalTokens += roleNum + len(content) + nameNum
	}
	historyMessage.Messages = append(historyMessage.Messages, openai.ChatCompletionMessage{
		Role:    openai.ChatMessageRoleUser,
		Content: msg,
	})
	return messages, nil
}
