package token

import (
	"fmt"
	"github.com/pkoukk/tiktoken-go"
	tiktokenloader "github.com/pkoukk/tiktoken-go-loader"
	"github.com/sashabaranov/go-openai"
	"strings"
)

func init() {
	tiktoken.SetBpeLoader(tiktokenloader.NewOfflineLoader())
}

func NewToken() *Token {
	return &Token{
		encoding: "cl100k_base", // "gpt-3.5-turbo"
	}
}

type Token struct {
	mode     int // 0default|1model
	encoding string
}

func (r *Token) Encoding(encoding string) *Token {
	r.mode = 0
	r.encoding = encoding

	return r
}
func (r *Token) Model(model string) *Token {
	r.mode = 1
	r.encoding = model

	return r
}

func (r *Token) Token() (*tiktoken.Tiktoken, error) {
	if r.mode == 1 {
		return tiktoken.EncodingForModel(r.encoding)
	} else {
		return tiktoken.GetEncoding(r.encoding)
	}
}
func (r *Token) Encode(content string) ([]int, error) {
	token, err := r.Token()
	if err != nil {
		return nil, err
	}

	return token.Encode(content, nil, nil), nil
}
func (r *Token) NumTokens(messages []openai.ChatCompletionMessage) (numTokens int, err error) {
	token, err := r.Token()
	if err != nil {
		return numTokens, err
	}

	var tokensPerMessage, tokensPerName int
	switch r.encoding {
	case "gpt-3.5-turbo-0613",
		"gpt-3.5-turbo-16k-0613",
		"gpt-4-0314",
		"gpt-4-32k-0314",
		"gpt-4-0613",
		"gpt-4-32k-0613":
		tokensPerMessage = 3
		tokensPerName = 1
	case "gpt-3.5-turbo-0301":
		tokensPerMessage = 4 // <|start|>{role/name}\n{content}<|end|>\n
		tokensPerName = -1
	default:
		if strings.Contains(r.encoding, "gpt-3.5-turbo") {
			return r.Model("gpt-3.5-turbo-0613").NumTokens(messages)
		} else if strings.Contains(r.encoding, "gpt-4") {
			return r.Model("gpt-4-0613").NumTokens(messages)
		} else {
			return numTokens, fmt.Errorf("not implemented for model %s. See https://github.com/openai/openai-python/blob/main/chatml.md for information on how messages are converted to tokens", r.encoding)
		}
	}

	for _, message := range messages {
		numTokens += tokensPerMessage
		numTokens += len(token.Encode(message.Content, nil, nil))
		numTokens += len(token.Encode(message.Role, nil, nil))
		numTokens += len(token.Encode(message.Name, nil, nil))
		if message.Name != "" {
			numTokens += tokensPerName
		}
	}
	numTokens += 3 // <|start|>assistant<|message|>

	return numTokens, nil
}

// Encodings
// o200k_base	gpt-4o
// cl100k_base	gpt-4, gpt-3.5-turbo, text-embedding-ada-002, text-embedding-3-small, text-embedding-3-large
// p50k_base	Codex models, text-davinci-002, text-davinci-003
// r50k_base (or gpt2)	GPT-3 models like davinci

// Models
// gpt-4o-*	o200k_base
// gpt-4-*	cl100k_base
// gpt-3.5-turbo-*	cl100k_base
// gpt-4o	o200k_base
// gpt-4	cl100k_base
// gpt-3.5-turbo	cl100k_base
// text-davinci-003	p50k_base
// text-davinci-002	p50k_base
// text-davinci-001	r50k_base
// text-curie-001	r50k_base
// text-babbage-001	r50k_base
// text-ada-001	r50k_base
// davinci	r50k_base
// curie	r50k_base
// babbage	r50k_base
// ada	r50k_base
// code-davinci-002	p50k_base
// code-davinci-001	p50k_base
// code-cushman-002	p50k_base
// code-cushman-001	p50k_base
// davinci-codex	p50k_base
// cushman-codex	p50k_base
// text-davinci-edit-001	p50k_edit
// code-davinci-edit-001	p50k_edit
// text-embedding-ada-002	cl100k_base
// text-embedding-3-small	cl100k_base
// text-embedding-3-large	cl100k_base
// text-similarity-davinci-001	r50k_base
// text-similarity-curie-001	r50k_base
// text-similarity-babbage-001	r50k_base
// text-similarity-ada-001	r50k_base
// text-search-davinci-doc-001	r50k_base
// text-search-curie-doc-001	r50k_base
// text-search-babbage-doc-001	r50k_base
// text-search-ada-doc-001	r50k_base
// code-search-babbage-code-001	r50k_base
// code-search-ada-code-001	r50k_base
// gpt2	gpt2
