package main

import (
	"StressTestOpenAI/core"
	"StressTestOpenAI/core/model"
	"StressTestOpenAI/pub"
	"context"
	"flag"
	"fmt"
	"math/rand"
	"os"
	"sync"
	"time"
)

var key string
var baseUrl string
var modelName string
var concurrent int
var duration int
var prompt string
var nums int
var mode string

// 新建stream 流模式请求
func NewOpenAIGoStreamRequest(
	wg *sync.WaitGroup,
	channel chan *model.Metric,
	st *model.ScheduledTask,
	prompt string,
) {
	defer wg.Done()
	ctx := context.Background()
	client := core.NewOpenAIGoClient(key, baseUrl)
	metric := new(model.Metric)
	stream := core.NewOpenAIGoStreamRequest(ctx, client, metric, st, prompt)

	if stream.Err() != nil {
		metric.FirstTokenTime = metric.RequestTime
		metric.EndTokenTime = metric.RequestTime
		metric.Err = stream.Err().Error()
		fmt.Println(fmt.Sprintf("new stream request请求失败: %v", stream.Err().Error()))
		channel <- metric
		return
	}

	core.WriteOpenAIGoStreamChannel(metric, channel, st, stream)
}

// 新建非流模式请求
func NewOpenAIGoRequest(
	wg *sync.WaitGroup,
	channel chan *model.Metric,
	st *model.ScheduledTask,
	prompt string,
) {
	defer wg.Done()
	ctx := context.Background()
	client := core.NewOpenAIGoClient(key, baseUrl)
	metric := new(model.Metric)
	res, err := core.NewOpenAIGoRequest(ctx, client, metric, st, prompt)
	if err != nil {
		metric.FirstTokenTime = metric.RequestTime
		metric.EndTokenTime = metric.RequestTime
		metric.Err = err.Error()
		fmt.Println(fmt.Sprintf("new request请求失败: %v", metric.Err))
		channel <- metric
		return
	}
	core.WriteOpenAIGoChannel(metric, channel, st, res)
}

func OpenAIGoTask(st *model.ScheduledTask) {

	// 报告处理的wg
	reportWg := new(sync.WaitGroup)
	channel := make(chan *model.Metric, 50000)
	reportWg.Add(1)
	go core.RunReport(st.GetSize(), reportWg, channel)
	reportWg.Add(1)
	switch st.Type {
	case "t":
		go st.ListenTime(reportWg)
	case "n":
		go st.ListenNum(reportWg)
	}
	r := rand.New(rand.NewSource(time.Now().UnixNano()))
	// 并发请求的wg
	wg := new(sync.WaitGroup)
	total_request := 0
	for _ = range st.Chan {
		wg.Add(1)
		st.Nums--
		total_request++
		prompt := pub.GetCompletionTokenData(r)
		prompt += "，不少于5000字"
		if st.Stream {
			go NewOpenAIGoStreamRequest(wg, channel, st, prompt)
			continue
		}
		go NewOpenAIGoRequest(wg, channel, st, prompt)

	}
	fmt.Println("------for循环结束-------")
	wg.Wait()
	fmt.Println("------ 请求结束 --------")
	close(channel)
	fmt.Println("----- 关闭报告通道 -----")
	reportWg.Wait()
	fmt.Println("-----  报告结束 -------")
}

func main() {
	flag.StringVar(&key, "key", "sk-gsrxufbcutdejupxvffufnqxhlmwzuwnilppjtdnorbenwhm", "apiKey")
	flag.StringVar(&baseUrl, "b", "", "openAI 格式的url")
	flag.StringVar(&modelName, "model", "deepseek-ai/DeepSeek-R1", "模型名称")
	flag.IntVar(&concurrent, "c", 32, "并发数")
	flag.IntVar(&duration, "d", 60, "持续时长(单位秒)")
	flag.StringVar(&prompt, "p", "你是谁", "content")
	flag.IntVar(&nums, "n", 160, "总请求次数")
	flag.StringVar(&mode, "m", "n", "模式：t 按时长压测；n 按总请求次数压测")
	flag.Parse()

	// 初始化测试数据
	path, err := os.Getwd()
	if err != nil {
		fmt.Println("获取当前路径错误： ", err.Error())
		return
	}
	path += "\\pub\\data\\results_token_test_1.xlsx"
	pub.InitPromptTokens(path)

	st := model.NewScheduledTask(concurrent, duration, nums, mode, modelName, prompt, true)
	OpenAIGoTask(st)

}
