package core

import (
	"StressTestOpenAI/core/model"
	"context"
	"fmt"
	openai_go "github.com/openai/openai-go"
	"github.com/openai/openai-go/option"
	"github.com/openai/openai-go/packages/ssestream"
	"strconv"
	"time"
)

func NewOpenAIGoClient(token, url string) *openai_go.Client {

	return openai_go.NewClient(
		option.WithAPIKey(token),
		option.WithBaseURL(url),
		option.WithHeader("Authorization", "Bearer "+token),
		option.WithHeader("Content-Type", "application/json"))
}

// 非stream模式
func NewOpenAIGoRequest(ctx context.Context,
	client *openai_go.Client,
	metric *model.Metric,
	st *model.ScheduledTask, prompt string) (*openai_go.ChatCompletion, error) {

	params := openai_go.ChatCompletionNewParams{
		Messages: openai_go.F([]openai_go.ChatCompletionMessageParamUnion{
			openai_go.UserMessage(prompt),
		}),
		Model: openai_go.F(st.ModelName),
		Seed:  openai_go.Int(0),
	}

	metric.RequestTime = time.Now().UnixMilli()

	return client.Chat.Completions.New(ctx, params)
}

// stream 流模式
func NewOpenAIGoStreamRequest(ctx context.Context,
	client *openai_go.Client,
	metric *model.Metric,
	st *model.ScheduledTask,
	prompt string) *ssestream.Stream[openai_go.ChatCompletionChunk] {
	params := openai_go.ChatCompletionNewParams{
		Messages: openai_go.F([]openai_go.ChatCompletionMessageParamUnion{
			openai_go.UserMessage(prompt),
		}),
		Model:               openai_go.F(st.ModelName),
		Seed:                openai_go.Int(0),
		MaxCompletionTokens: openai_go.Int(1024),
	}
	metric.RequestTime = time.Now().UnixMilli()
	return client.Chat.Completions.NewStreaming(ctx, params)
}

// stream模式使用openai-go客户端
func WriteOpenAIGoStreamChannel(metric *model.Metric,
	channel chan *model.Metric, st *model.ScheduledTask,
	stream *ssestream.Stream[openai_go.ChatCompletionChunk]) {
	defer stream.Close()
	var curr_content string
	acc := openai_go.ChatCompletionAccumulator{}
	index := 0
	for stream.Next() {
		chunk := stream.Current()
		acc.AddChunk(chunk)
		if len(chunk.Choices) > 0 && index == 0 {
			index++
			metric.FirstTokenTime = time.Now().UnixMilli()
		}
		if chunk.Choices[0].FinishReason != "" {
			metric.EndTokenTime = time.Now().UnixMilli()
			metric.InputTokens = chunk.Usage.PromptTokens
			metric.OutputTokens = chunk.Usage.CompletionTokens
		}

	}
	if !st.IsClosed {
		st.Chan <- true
	}
	if err := stream.Err(); err != nil {
		metric.Err = err.Error()
	}

	if metric.EndTokenTime == 0 {
		metric.EndTokenTime = time.Now().UnixMilli()
	}

	if len(acc.Choices) > 0 {
		curr_content = acc.Choices[0].Message.Content
	}

	metric.ContentLen = len(curr_content)
	//tokenResult := GetTokens(st.Content, metric.Content)
	//metric.Tokens = tokenResult.OutputTokenCount
	metric.TTFT = metric.FirstTokenTime - metric.RequestTime
	metric.TotalTokenTime = metric.EndTokenTime - metric.FirstTokenTime
	metric.E2EL = metric.EndTokenTime - metric.RequestTime
	if metric.TotalTokenTime != 0 && metric.OutputTokens != 0 {
		metric.OutputTokenPS, _ = strconv.ParseFloat(fmt.Sprintf("%0.2f", float64(metric.OutputTokens)/float64(metric.TotalTokenTime/1000)), 64)
		metric.TPOT, _ = strconv.ParseFloat(fmt.Sprintf("%0.2f", float64(metric.TotalTokenTime)/float64(metric.OutputTokens-1)), 64)
		metric.ITL, _ = strconv.ParseFloat(fmt.Sprintf("%0.2f", float64(metric.EndTokenTime-metric.RequestTime)/float64(metric.OutputTokens)), 64)
	}
	channel <- metric
}

// 非stream模式使用openai-go客户端
func WriteOpenAIGoChannel(metric *model.Metric,
	channel chan *model.Metric, st *model.ScheduledTask,
	res *openai_go.ChatCompletion) {
	if !st.IsClosed {
		st.Chan <- true
	}
	metric.FirstTokenTime = time.Now().UnixMilli()
	var curr_content string
	if metric.EndTokenTime == 0 {
		metric.EndTokenTime = time.Now().UnixMilli()
	}
	if res != nil && len(res.Choices) > 0 {
		metric.OutputTokens = res.Usage.CompletionTokens
		metric.InputTokens = res.Usage.PromptTokens
	}
	metric.ContentLen = len(curr_content)

	//tokenResult := GetTokens(st.Content, metric.Content)
	//metric.Tokens = tokenResult.OutputTokenCount
	metric.TTFT = metric.FirstTokenTime - metric.RequestTime
	metric.TotalTokenTime = metric.EndTokenTime - metric.FirstTokenTime
	metric.E2EL = metric.EndTokenTime - metric.RequestTime
	if metric.TotalTokenTime != 0 && metric.OutputTokens != 0 {
		metric.OutputTokenPS, _ = strconv.ParseFloat(fmt.Sprintf("%0.2f", float64(metric.OutputTokens)/float64(metric.TotalTokenTime/1000)), 64)
		metric.TPOT, _ = strconv.ParseFloat(fmt.Sprintf("%0.2f", float64(metric.TotalTokenTime)/float64(metric.OutputTokens-1)), 64)
		metric.ITL, _ = strconv.ParseFloat(fmt.Sprintf("%0.2f", float64(metric.EndTokenTime-metric.RequestTime)/float64(metric.OutputTokens)), 64)
	}
	channel <- metric
}
