from openai import OpenAI
import time
from locust import TaskSet, HttpUser, task, run_single_user, events
import random


def send_stream(query):
    # client = OpenAI(
    #
    #     # api_key="bce-v3/ALTAK-vlZ21HSLkB44Fg1Fhyv9y/e6a34afc0d6e957027fe2fb106b8cf8917134cd2",  # bearer token
    #     api_key="bce-v3/ALTAK-vsqn6FlTocV4FbBumngy2/f1ed46a33e7c2f4b91a5c11294babb2162fa1748",  # bearer token
    #     base_url="https://qianfan.baidubce.com/v2"
    #
    # )

    client = OpenAI(
        # 若没有配置环境变量，请用百炼API Key将下行替换为：api_key="sk-xxx",
        api_key="sk-30a77ee37da8436596edbc57242afc251",
        # 如何获取API Key：https://help.aliyun.com/zh/model-studio/developer-reference/get-api-key
        base_url="https://dashscope.aliyuncs.com/compatible-mode/v1",
    )

    response = client.chat.completions.create(

        # model="deepseek-v3",
        model="deepseek-r1",
        messages=[{'role': 'system', 'content': 'You are a helpful assistant.'},
                  {'role': 'user', 'content': query}],
        stream=True,
        stream_options={
            "include_usage": True
        }
    )

    # 初始化变量
    first_packet_time = None
    start_time = time.time()
    # 标记第一个思考包是否出现，主要用于计算首token耗时
    is_first_think = False
    # 输出token数量
    completion_tokens = 0
    # 输入token数量
    prompt_tokens = 0

    for chunk in response:
        # 判断思考包，chunk.choices[0].delta.reasoning_content保存的是思考内容
        if len(chunk.choices) > 0 and chunk.choices[0].delta.reasoning_content is not None and chunk.choices[
            0].delta.reasoning_content != "":
            if not is_first_think:
                first_packet_time = time.time()
                first_elapsed_time = first_packet_time - start_time # 计算首包耗时，因为我们用的是deepseek R1思考模型，所以首包其实是思考包的首包
                is_first_think = True

        elif len(chunk.choices) > 0 and chunk.choices[0].delta.content is not None:
            # 这里是答案包
            print(chunk.choices[0].delta.content, end="")
        else:
            # 这里一般是尾包，用于返回模型处理的token数量信息，用于计费，我们这里用来计算输出和输出的token的长度。
            completion_tokens = chunk.usage.completion_tokens
            prompt_tokens = chunk.usage.prompt_tokens

    end_time = time.time()
    tokens_per_second = completion_tokens / (end_time - first_packet_time) # 计算吐字率，因为吐字率是不计算首包耗时的，所以要把首包耗时的时间去掉
    all_time = end_time - start_time # 计算整个请求花费的时间，方便计算后面的QPM

    print(f"每秒输出token数量: {tokens_per_second:.2f}")

    return all_time, first_elapsed_time, tokens_per_second, prompt_tokens, completion_tokens


class ModelRequestSet(TaskSet):
    """
    任务集
    """

    @task
    def send_request(self):
        query = random.choice(self.user.share_data).strip()

        try:
            # 自定义指标，分别用于计算首包，吐字率，qpm，平均输入token数量和平均输出token数量。
            all_time, first_elapsed_time, tokens_per_second, input_tokens, output_tokens = send_stream(query)
            events.request.fire(request_type="http", name="first", response_time=first_elapsed_time,
                                response_length=0)
            events.request.fire(request_type="http", name="token_per_second", response_time=tokens_per_second,
                                response_length=0)
            events.request.fire(request_type="http", name="whole", response_time=all_time,
                                response_length=0)
            events.request.fire(request_type="http", name="input_token", response_time=input_tokens,
                                response_length=input_tokens)
            events.request.fire(request_type="http", name="output_token", response_time=output_tokens,
                                response_length=output_tokens)
        except Exception as e:
            events.request.fire(request_type="http", name="error", response_time=1 * 1000,
                                response_length=0, exception=f"大模型调用错误:{e}")
            print(e)


def get_test_data():
    return ['你是谁', "今天北京天气如何", "胃疼怎么办"]


class ModelUser(HttpUser):
    """
    - 会产生并发用户实例
    - 产生的用户实例会依据规则去执行任务集

    """
    # 定义的任务集
    tasks = [ModelRequestSet, ]
    host = 'http://www.baidu.com'
    share_data = get_test_data()
