package azure

import (
	"context"
	"errors"
	"github.com/gogf/gf/v2/container/gmap"
	"github.com/gogf/gf/v2/frame/g"
	"github.com/gogf/gf/v2/text/gstr"
	"github.com/gogf/gf/v2/util/gconv"
	"github.com/google/uuid"
	"github.com/pkoukk/tiktoken-go"
	gogpt "github.com/sashabaranov/go-openai"
	"io"
	"math"
	"os"
	"strings"
	"time"
	"wxgf/library/helper"
)

var OpenAI = &openaiService{}

type openaiService struct {
	client                   *gogpt.Client
	cfg                      *openAICompletionConfig
	credits                  *gmap.StrAnyMap
	tlsHandshakeTimeoutError string
	tt                       *tiktoken.Tiktoken
}

type OpenAIResponseFormat int

const OpenAIResponseFormatDefault OpenAIResponseFormat = 0
const OpenAIResponseFormatMarkdown OpenAIResponseFormat = 1

type openAICompletionConfig struct {
	MaxTokens int    `json:"max_tokens,omitempty"`
	Endpoint  string `json:"endpoint,omitempty"`
	SecretKey string `json:"secret_key,omitempty"`
	Version   string `json:"version,omitempty"`
	Engin     string `json:"engin,omitempty"`
	Timeout   int    `json:"timeout,omitempty"`
}

type openAICompletionResponse struct {
	// 消息ID
	MessageID string `json:"message_id"`
	// 会话ID
	ConversationID string `json:"conversation_id"`

	Prompt string `json:"prompt"`

	// 实际发送给openai的prompt
	PromptSent string `json:"-"`
	Response   string `json:"response,omitempty"`
	Error      error  `json:"error,omitempty"`
	// 本次用量（stream模式下不支持，不知道 https://github.com/sugarme/tokenizer 是否可以处理，待实验）
	Usage openAICompletionResponseUsage `json:"usage"`
}

type openAICompletionResponseUsage struct {
	Tokens *openAICompletionResponseUsageToken `json:"tokens"`
	Credit float64                             `json:"credit"`
}

type openAICompletionResponseUsageToken struct {
	Prompt     int `json:"prompt"`
	Completion int `json:"completion"`
	Total      int `json:"total"`
}

type openAICompletionRequest struct {
	srv            *openaiService
	prompt         string
	maxTokens      int
	conversationID string
	temperature    float32
	streamCallback func(response string, eof bool, err error) bool
	parentMessages []*openAICompletionParentMessage
	responseFormat OpenAIResponseFormat
}

type openAICompletionParentMessage struct {
	Prompt   string
	Response string
}

func (r *openaiService) GetTokensCount(text string) int {
	if r.tt == nil {
		// set environment variable TIKTOKEN_MODEL_DIR to the directory where you want to store the models
		cacheDir := helper.File.GetStorageDir("tiktoken", "cache")
		//cacheDir = "/Users/harry/Projects/WWW/git/backend/packages/golang/wxgf/storage/tiktoken/cache"
		os.Setenv("TIKTOKEN_CACHE_DIR", cacheDir)
		r.tt, _ = tiktoken.GetEncoding("cl100k_base")
	}
	token := r.tt.Encode(text, nil, nil)
	return len(token)
}

func (r *openaiService) init() {
	if r.cfg == nil {
		r.cfg = &openAICompletionConfig{}
		r.cfg.MaxTokens = helper.Config.GetInt("app.azure.openai.maxTokens")
		if r.cfg.MaxTokens <= 16 {
			r.cfg.MaxTokens = 2048
		}
		if r.cfg.MaxTokens > 4000 {
			r.cfg.MaxTokens = 4000
		}
		r.cfg.Endpoint = helper.Config.GetString("app.azure.openai.endpoint")
		if r.cfg.Endpoint == "" {
			r.cfg.Endpoint = "https://wangxuteopenai-us-south.openai.azure.com/"
		}
		r.cfg.SecretKey = helper.Config.GetString("app.azure.openai.secretKey")
		if r.cfg.SecretKey == "" {
			r.cfg.SecretKey = "1a9f94d9ed7946e19d2e19fe5eca08fe"
		}
		r.cfg.Version = helper.Config.GetString("app.azure.openai.version")
		if r.cfg.Version == "" {
			r.cfg.Version = "2023-03-15-preview"
		}
		r.cfg.Engin = helper.Config.GetString("app.azure.openai.engin")
		if r.cfg.Engin == "" {
			r.cfg.Engin = "gpt-35-turbo"
		}

		r.cfg.Timeout = helper.Config.GetInt("app.azure.openai.timeout")
		if r.cfg.Timeout <= 0 {
			r.cfg.Timeout = 60
		}
	}

	if r.client == nil {
		cfg := gogpt.DefaultConfig(r.cfg.SecretKey)
		cfg.BaseURL = r.cfg.Endpoint
		cfg.APIType = gogpt.APITypeAzure
		cfg.Engine = r.cfg.Engin
		cfg.APIVersion = r.cfg.Version
		r.client = gogpt.NewClientWithConfig(cfg)
	}

	if r.credits == nil {
		r.credits = gmap.NewStrAnyMapFrom(g.MapStrAny{
			gogpt.GPT3Dot5Turbo: 0.002,
		}, true)
	}
}

func (r *openaiService) getCredits(model string, totalTokens int) float64 {
	v := r.credits.Get(model)
	price := v.(float64) / 1000
	return math.Round(price*float64(totalTokens)*100000) / 100000
}

func (r *openaiService) NewRequest(prompt string) *openAICompletionRequest {
	r.init()
	return &openAICompletionRequest{
		srv:    r,
		prompt: prompt,
	}
}

/*
AddParentMessage 用于上下文关联回答，即连续对话，注意顺序
对话①
Q：李白的早期的主要诗歌有哪些？
A：李白的早期诗歌包括《静夜思》、《春晓》、《游子吟》、《赠汪伦》、《望庐山瀑布》、《关山月》和《将进酒》等

对话②
Q：他晚期的诗歌呢？
A：李白的晚期诗歌包括《蜀道难》、《黄鹤楼送孟浩然之广陵散》、《山坡羊·其二》、《游山西村》、《行路难》、《将进酒》等

对话③
Q：他和杜甫是否有深厚的友谊?
A：李白与杜甫之间有深厚的友谊，他们曾互相探讨诗歌创作，并多次勉励彼此，促进了当时整个文学圈的发展。

对话①，可以不用设置parent message
对话②，需要设置对话①的prompt和response作为 parent message
对话③，需要设置对话①和对话② 作为 parent message

暂时限定最多设置2层parent message
因为对话可能一直具有连续性，如果太多，会超出token的大小，并且导致计费过高，成本增加
*/
func (r *openAICompletionRequest) AddParentMessage(prompt string, response string) *openAICompletionRequest {
	r.parentMessages = append(r.parentMessages, &openAICompletionParentMessage{
		Prompt:   prompt,
		Response: response,
	})
	return r
}

// SetStreamCallback 设置stream模式下的回调函数。
// 【注意】：CompletionTypeChat 模式下，不支持stream模式
func (r *openAICompletionRequest) SetStreamCallback(callback func(text string, eof bool, err error) bool) *openAICompletionRequest {
	r.streamCallback = callback
	return r
}

func (r *openAICompletionRequest) SetPrompt(prompt string) *openAICompletionRequest {
	r.prompt = prompt
	return r
}
func (r *openAICompletionRequest) SetMaxTokens(tokens int) *openAICompletionRequest {
	r.maxTokens = tokens
	return r
}
func (r *openAICompletionRequest) SetConversationID(conversationID string) *openAICompletionRequest {
	r.conversationID = conversationID
	return r
}

func (r *openAICompletionRequest) SetTemperature(temperature float32) *openAICompletionRequest {
	r.temperature = temperature
	return r
}

func (r *openAICompletionRequest) SetResponseFormat(format OpenAIResponseFormat) *openAICompletionRequest {
	r.responseFormat = format
	return r
}

func (r *openAICompletionRequest) buildPromptMessages() []gogpt.ChatCompletionMessage {
	var messages []gogpt.ChatCompletionMessage
	if len(r.parentMessages) > 0 {
		for _, msg := range r.parentMessages {
			messages = append(messages, gogpt.ChatCompletionMessage{
				Role:    gogpt.ChatMessageRoleUser,
				Content: msg.Prompt,
			})
			messages = append(messages, gogpt.ChatCompletionMessage{
				Role:    gogpt.ChatMessageRoleAssistant,
				Content: gstr.SubStr(msg.Response, 0, len(msg.Prompt)),
			})
		}
	}

	question := r.prompt
	if r.responseFormat == OpenAIResponseFormatMarkdown {
		question = question + "\n" + "Please use Markdown to return.\n"
	}

	q := gogpt.ChatCompletionMessage{
		Role:    "user",
		Content: question,
	}
	messages = append(messages, q)

	return messages
}

/*
Do
https://platform.openai.com/docs/api-reference/completions/create

如果 streamCallback 不为nil，说明使用stream模式接收数据
*/
func (r *openAICompletionRequest) Do() *openAICompletionResponse {
	response := &openAICompletionResponse{
		MessageID:      uuid.New().String(),
		ConversationID: r.conversationID,
		Prompt:         r.prompt,
		Usage:          openAICompletionResponseUsage{},
	}
	if response.ConversationID == "" {
		response.ConversationID = uuid.New().String()
	}

	prompt := gstr.Trim(r.prompt)
	if prompt == "" {
		response.Error = errors.New("invalid prompt")
		return response
	}

	maxTokens := r.maxTokens
	if maxTokens <= 0 {
		maxTokens = r.srv.cfg.MaxTokens
	}

	if r.streamCallback != nil {
		r.doStreamCompletionChat(maxTokens, response)
	} else {
		r.doCompletionChat(maxTokens, response)
	}

	if response.Response != "" {
		response.Response = gstr.Trim(response.Response, "?？")
	}
	return response
}

func (r *openAICompletionRequest) doCompletionChat(maxTokens int, response *openAICompletionResponse) {
	prompt := r.buildPromptMessages()
	response.PromptSent = gconv.String(prompt)
	req := gogpt.ChatCompletionRequest{
		Model:            gogpt.GPT3Dot5Turbo,
		MaxTokens:        maxTokens,
		Messages:         prompt,
		Stream:           r.streamCallback != nil,
		TopP:             1,
		FrequencyPenalty: 0,
		PresencePenalty:  0,
		Temperature:      0.7,
	}

	var resp gogpt.ChatCompletionResponse
	ctx := context.Background()
	if r.srv.cfg.Timeout > 0 {
		var cancel context.CancelFunc
		ctx, cancel = context.WithTimeout(ctx, time.Duration(r.srv.cfg.Timeout)*time.Second)
		defer cancel()
	}

	for i := 0; i < 2; i++ {
		resp, response.Error = r.srv.client.CreateChatCompletion(ctx, req)
		if response.Error == nil {
			break
		} else {
			if !strings.Contains(response.Error.Error(), r.srv.tlsHandshakeTimeoutError) {
				break
			}
		}
	}

	if response.Error != nil {
		return
	}

	response.Usage.Tokens = &openAICompletionResponseUsageToken{
		Completion: resp.Usage.CompletionTokens,
		Prompt:     resp.Usage.PromptTokens,
		Total:      resp.Usage.TotalTokens,
	}
	response.Usage.Credit = r.srv.getCredits(req.Model, resp.Usage.TotalTokens)

	if len(resp.Choices) > 0 {
		response.Response = resp.Choices[0].Message.Content
	}
}

func (r *openAICompletionRequest) doStreamCompletionChat(maxTokens int, response *openAICompletionResponse) {
	prompt := r.buildPromptMessages()
	response.PromptSent = gconv.String(prompt)
	req := gogpt.ChatCompletionRequest{
		Model:            gogpt.GPT3Dot5Turbo,
		MaxTokens:        maxTokens,
		Messages:         prompt,
		Stream:           r.streamCallback != nil,
		TopP:             1,
		FrequencyPenalty: 0,
		PresencePenalty:  0,
		Temperature:      r.temperature,
	}

	ctx := context.Background()
	if r.srv.cfg.Timeout > 0 {
		var cancel context.CancelFunc
		ctx, cancel = context.WithTimeout(ctx, time.Duration(r.srv.cfg.Timeout)*time.Second)
		defer cancel()
	}

	stream, err := r.srv.client.CreateChatCompletionStream(ctx, req)
	if err != nil {
		response.Error = err
		return
	}
	defer stream.Close()

	text := ""

	for {
		streamResponse, err := stream.Recv()
		if errors.Is(err, io.EOF) {
			r.streamCallback(text, true, nil)
			break
		}

		if err != nil {
			r.streamCallback("", true, err)
			break
		}

		text += streamResponse.Choices[0].Delta.Content
		if !r.streamCallback(streamResponse.Choices[0].Delta.Content, false, nil) {
			stream.Close()
			break
		}
	}

	completionTokens := r.srv.GetTokensCount(text)
	promptTokens := r.srv.GetTokensCount(response.PromptSent)
	totalTokens := completionTokens + promptTokens

	response.Usage.Tokens = &openAICompletionResponseUsageToken{
		Completion: completionTokens,
		Prompt:     promptTokens,
		Total:      totalTokens,
	}

	response.Usage.Credit = r.srv.getCredits(req.Model, totalTokens)
	response.Response = text
}
