import argparse
import asyncio
import json
import random
import sys
import time
import traceback
import warnings
from dataclasses import dataclass, field
from pathlib import Path
from typing import AsyncGenerator, List, Optional, Tuple, Dict, Any
import aiohttp
import numpy as np
from tqdm.asyncio import tqdm
import os
import csv
import json
from datetime import datetime
from loguru import logger

from vllm.transformers_utils.tokenizer import get_tokenizer


AIOHTTP_TIMEOUT = aiohttp.ClientTimeout(total=6 * 60 * 60)

# 写入 CSV 文件
def write_results_to_csv(filename: str, model_name: str, results: List[Tuple]):
    """Write test results to a CSV file with header check and append mode."""
    header = ['Model Name', 'TP',"PP", 'Quant Type', "Test Data", 'Max Model Len','GMZ',
              'Prompt Length', 'Output Length', 'Request Num',
              'Output Token throughput',
              'Mean TPOT (ms)',
              "Median TPOT (ms)",
              "Total token throughput",
              'Mean TTFT (ms)',
              'Median TTFT (ms)',
              'Max TTFT (ms)',
              'Min TTFT (ms)',
              'Mean ITL (ms)',
              'Mean Latency(ms)',
              'E2E time (s)',
              "Percentiles_ttft_ms(25,50,75,99)",
              "Percentiles_itl_ms(25,50,75,99)",
              ]

    # Check if the file already exists and has content
    file_exists = os.path.isfile(filename) and os.path.getsize(filename) > 0

    with open(filename, mode='a', newline='') as file:  # Use 'a' mode for appending
        writer = csv.writer(file)

        # Write header only if the file does not exist or is empty
        if not file_exists:
            writer.writerow(header)

        # Write the data
        for result in results:
            writer.writerow([model_name, args.tensor_parallel_size,args.pipeline_parallel_size,\
                args.quant_type, \
                args.data_type, \
                args.max_model_len, \
                args.gmz] + list(result))


@dataclass
class BenchmarkMetrics:
    completed: int
    total_input: int
    total_output: int
    request_throughput: float
    output_throughput: float
    output_throughput_per_request: float
    total_token_throughput: float
    total_decode_token: int
    decode_throughput_w_ftl: float
    decode_throughput_wo_ftl: float
    decode_time_wo_ftl: float
    mean_ttft_ms: float
    median_ttft_ms: float
    max_ttft_ms: float
    min_ttft_ms: float
    std_ttft_ms: float
    percentiles_ttft_ms: List[Tuple[float, float]]
    mean_tpot_ms: float
    median_tpot_ms: float
    max_tpot_ms: float
    min_tpot_ms: float
    std_tpot_ms: float
    percentiles_tpot_ms: List[Tuple[float, float]]
    mean_itl_ms: float
    median_itl_ms: float
    max_itl_ms: float
    min_itl_ms: float
    std_itl_ms: float
    percentiles_itl_ms: List[Tuple[float, float]]
    # E2EL stands for end-to-end latency per request.
    # It is the time taken on the client side from sending
    # a request to receiving a complete response.
    mean_e2el_ms: float
    median_e2el_ms: float
    std_e2el_ms: float
    percentiles_e2el_ms: List[Tuple[float, float]]


@dataclass
class RequestFuncInput:
    prompt: List[int]
    api_url: str
    prompt_len: int
    output_len: int
    model: str
    serve_name: str
    api_key: str
    best_of: int = 1
    logprobs: Optional[int] = None
    multi_modal_content: Optional[dict] = None
    ignore_eos: bool = True


@dataclass
class RequestFuncOutput:
    generated_text: str = ""
    success: bool = False
    latency: float = 0.0
    ttft: float = 0.0  # Time to first token
    itl: List[float] = field(default_factory=list)  # List of inter-token latencies
    prompt_len: int = 0
    output_len: int = 0
    error: str = ""


def get_random_lens(
    input_tokens,
    output_tokens,
    max_input_tokens,
    max_output_tokens,
    num_requests,
    input_output_type,
):
    assert 1 <= input_tokens < max_input_tokens
    assert 1 <= output_tokens < max_output_tokens
    min_input_tokens = input_tokens
    min_output_tokens = output_tokens
    input_mean = int((max_input_tokens + min_input_tokens) / 2)
    input_std = int((max_input_tokens - input_mean) / 2)
    output_mean = int((max_output_tokens + min_output_tokens) / 2)
    output_std = int((max_output_tokens - output_mean) / 2)

    input_len_list = []
    output_len_list = []
    for _ in range(num_requests):
        if input_output_type == "normal":
            while True:
                input_length = int(np.random.normal(input_mean, input_std))
                if min_input_tokens <= input_length <= max_input_tokens:
                    break
            while True:
                output_length = int(np.random.normal(output_mean, output_std))
                if min_output_tokens <= output_length <= max_output_tokens:
                    break
        else:
            input_length = int(np.random.uniform(min_input_tokens, max_input_tokens))
            output_length = int(np.random.uniform(min_output_tokens, max_output_tokens))

        input_len_list.append([None, input_length])
        output_len_list.append(output_length)

    return input_len_list, output_len_list


def sample_requests(
    num_requests: int,
    input_tokens: int,
    output_tokens: int,
    max_input_tokens: int,
    max_output_tokens: int,
    input_output_type: str = "fix",
    seed: int = 42,
) -> Tuple[List[Tuple[str, int]], List[int]]:
    np.random.seed(seed)
    random.seed(seed)

    if input_output_type == "fix":
        input_len_list = [[None, input_tokens] for _ in range(num_requests)]
        output_len_list = [output_tokens for _ in range(num_requests)]
    elif input_output_type in ["normal", "uniform"]:
        input_len_list, output_len_list = get_random_lens(
            input_tokens,
            output_tokens,
            max_input_tokens,
            max_output_tokens,
            num_requests,
            input_output_type,
        )
    else:
        raise NotImplementedError("You can modify this code according to your needs")

    for inputs in input_len_list:
        assert len(inputs) == 2
        # [str, int] or [None ,int]
        assert isinstance(inputs[0], str) or inputs[0] is None
        assert isinstance(inputs[1], int)
    for outptus in output_len_list:
        assert isinstance(outptus, int)

    return input_len_list, output_len_list


async def get_request(
    input_requests: List[RequestFuncInput],
    time_interval: float,
):
    input_requests = iter(input_requests)
    for request in input_requests:
        yield request
        
        if time_interval == 0:
            continue
        await asyncio.sleep(time_interval)


def remove_prefix(text: str, prefix: str) -> str:
    return text[len(prefix) :] if text.startswith(prefix) else text


def remove_suffix(text: str, suffix: str) -> str:
    return text[: -len(suffix)] if text.endswith(suffix) else text


def get_auth_headers() -> Dict[str, str]:
    api_key = os.environ.get("OPENAI_API_KEY")
    if api_key:
        return {"Authorization": f"Bearer {api_key}"}
    else:
        return {}


async def async_request_openai_completions(
    request_func_input: RequestFuncInput,
    pbar: Optional[tqdm] = None,
) -> RequestFuncOutput:
    api_url = request_func_input.api_url
    assert api_url.endswith(
        ("completions", "profile")
    ), "OpenAI Completions API URL must end with 'completions' or 'profile'."


    async with aiohttp.ClientSession(timeout=AIOHTTP_TIMEOUT) as session:
        payload = {
            "model": request_func_input.serve_name,
            "messages": [{"role": "user","content": request_func_input.prompt}],
            "temperature": 0.0,
            "best_of": request_func_input.best_of,
            "max_tokens": request_func_input.output_len,
            "logprobs": request_func_input.logprobs,
            "stream": True,
            "ignore_eos": request_func_input.ignore_eos,
            "stream_options": {"include_usage": True},
        }

        if request_func_input.api_key is None:
            api_key = f"Bearer EMPTY"
        else:
            api_key = f"Bearer {request_func_input.api_key}"

        headers = {
            "Content-Type": "application/json",
            "Authorization": api_key
        }

        output = RequestFuncOutput()
        st = time.perf_counter()
        most_recent_timestamp = st
        try:
            async with session.post(url=api_url, json=payload, headers=headers) as response:
                if response.status == 200:
                    async for chunk_bytes in response.content:
                        if not chunk_bytes or chunk_bytes in {b'\r', b'\n'} or b': ping - ' in chunk_bytes:
                            continue
                        if b"local_rate_limited" in chunk_bytes:
                            logger.info(f"{chunk_bytes}, local rate limited, break")
                            break

                        chunk = remove_prefix(chunk_bytes.decode('utf-8'), "data:").strip()
                        if chunk == "[DONE]":
                            output.latency = time.perf_counter() - st
                            output.success = True
                        else:
                            data = json.loads(chunk)
                            # response error
                            if data.get("status", {}).get("code") == 500:
                                logger.error(f"Request failed with status code {response.status}: {chunk}")
                                output.error = f"{response.reason} {chunk}".strip()
                                output.success = False
                                break

                            choices = data.get("choices", [])
                            if not choices and data.get("usage"):
                                output.prompt_len = data["usage"]["prompt_tokens"]
                                output.output_len = data["usage"]["completion_tokens"]
                                continue

                            delta = choices[0].get("delta", {})
                            if delta["content"] == "<think>":
                                continue
                            timestamp = time.perf_counter()
                            if "content" in delta:
                                if output.ttft == 0.0:
                                    output.ttft = time.perf_counter() - st
                                else:
                                    output.itl.append(timestamp - most_recent_timestamp)
                                    # ii += 1
                                output.generated_text += delta["content"]
                            most_recent_timestamp = timestamp
                else:
                    error_message = ''.join([chunk.decode('utf-8').strip() async for chunk in response.content])
                    output.error = f"{response.reason} {error_message}".strip()
                    logger.error(f"Request failed with status code {response.status}: {output.error}")
                    output.success = False  
        except Exception:
            output.success = False
            exc_info = sys.exc_info()
            output.error = "".join(traceback.format_exception(*exc_info))

    if pbar:
        pbar.update(1)
    return output


async def benchmark(
    input_requests: List[RequestFuncInput],
    time_interval: float,
) -> None:
    pbar = tqdm(total=len(input_requests))
    tasks: List[asyncio.Task] = []
    async for request in get_request(input_requests, time_interval):
        task = asyncio.create_task(async_request_openai_completions(request,pbar=pbar))
        tasks.append(task)
    outputs = await asyncio.gather(*tasks)
    return outputs


def calculate_metrics(
    outputs: List[RequestFuncOutput],
    dur_s: float,
    tokenizer,
    selected_percentile_metrics: List[str],
    selected_percentiles: List[float],
) -> Tuple[BenchmarkMetrics, List[int]]:
    actual_output_lens: List[int] = []
    total_decode_lens: List[int] = []
    total_input = 0
    completed = 0
    itls: List[float] = []
    tpots: List[float] = []
    ttfts: List[float] = []
    e2els: List[float] = []
    tps_requests = []
    total_tps_requests = []

    for i in range(len(outputs)):
        if outputs[i].success:
            output_len = outputs[i].output_len
            prompt_len = outputs[i].prompt_len
            actual_output_lens.append(output_len)
            total_decode_lens.append(output_len-1)
            total_input += prompt_len
            
            if output_len > 1:
                tpots.append((outputs[i].latency - outputs[i].ttft) / (output_len - 1))
                
            itls += outputs[i].itl
            ttfts.append(outputs[i].ttft)
            e2els.append(outputs[i].latency)
                        
            tps_request = output_len/(outputs[i].ttft+sum(outputs[i].itl))
            total_tps_request = (output_len+prompt_len)/(outputs[i].ttft+sum(outputs[i].itl))
            tps_requests.append(tps_request)
            total_tps_requests.append(total_tps_request)
            completed += 1
  
        else:
            actual_output_lens.append(0)

    if completed == 0:
        warnings.warn(
            "All requests failed. This is likely due to a misconfiguration "
            "on the benchmark arguments.",
            stacklevel=2)

    metrics = BenchmarkMetrics(
        completed=completed,
        total_input=total_input,
        total_output=sum(actual_output_lens),
        total_decode_token=sum(total_decode_lens),
        decode_throughput_w_ftl=sum(total_decode_lens)/dur_s,
        decode_throughput_wo_ftl=sum(total_decode_lens)/(dur_s-np.mean(ttfts)),
        decode_time_wo_ftl=round((dur_s-np.mean(ttfts)) * 1000, 2),
        request_throughput=completed / dur_s,
        output_throughput=sum(tps_requests),
        output_throughput_per_request=np.mean(tps_requests),
        total_token_throughput=sum(total_tps_requests),
        mean_ttft_ms=np.mean(ttfts or 0) * 1000,  # ttfts is empty if streaming is not supported by backend
        std_ttft_ms=np.std(ttfts or 0) * 1000,
        median_ttft_ms=np.median(ttfts or 0) * 1000,
        max_ttft_ms=np.max(ttfts or 0) * 1000,
        min_ttft_ms=np.min(ttfts or 0) * 1000,
        percentiles_ttft_ms=[(p, round(np.percentile(ttfts or 0, p) * 1000,2)) for p in selected_percentiles],
        mean_tpot_ms=np.mean(tpots or 0) * 1000,
        max_tpot_ms=np.max(tpots or 0) * 1000,
        min_tpot_ms=np.min(tpots or 0) * 1000,
        std_tpot_ms=np.std(tpots or 0) * 1000,
        median_tpot_ms=np.median(tpots or 0) * 1000,
        percentiles_tpot_ms=[(p, round(np.percentile(tpots or 0, p) * 1000,2)) for p in selected_percentiles],
        mean_itl_ms=np.mean(itls or 0) * 1000,
        max_itl_ms=np.max(itls or 0) * 1000,
        min_itl_ms=np.min(itls or 0) * 1000,
        std_itl_ms=np.std(itls or 0) * 1000,
        median_itl_ms=np.median(itls or 0) * 1000,
        percentiles_itl_ms=[(p, round(np.percentile(itls or 0, p) * 1000,2)) for p in selected_percentiles],
        mean_e2el_ms=np.median(e2els or 0) * 1000,
        std_e2el_ms=np.std(e2els or 0) * 1000,
        median_e2el_ms=np.mean(e2els or 0) * 1000,
        percentiles_e2el_ms=[(p, round(np.percentile(e2els or 0, p) * 1000,2)) for p in selected_percentiles],
    )

    return metrics, actual_output_lens


def get_datasets(sent_len, num_promt, tokenizer, file_path):
    datasets = []
    # <｜begin▁of▁sentence｜><｜User｜>...<｜Assistant｜>
    new_sent_len = sent_len - 3
    with open(file_path, 'r', encoding='utf-8') as file:
        for line in file:
            data = json.loads(line.strip())  # 使用 strip() 去除可能的空白字符
            context = data['context']
            promt = data['input']
            context_ids = tokenizer.encode(context, add_special_tokens=False)
            promt_ids = tokenizer.encode(promt, add_special_tokens=False)
            context_len = new_sent_len-len(promt_ids)
            context_new = tokenizer.decode(context_ids[:context_len])
            
            new_input = context_new + promt
            new_len = len(tokenizer.encode(new_input, add_special_tokens=False))
            if new_len == new_sent_len:
                datasets.append(new_input)
                
    assert num_promt <= len(datasets)
    random.shuffle(datasets)
    return datasets[:num_promt]


async def main(args):
    tokenizer = get_tokenizer(args.model, trust_remote_code=True)
    api_url = f"http://{args.host}:{args.port}/v1/chat/completions"

    np.random.seed(args.seed)
    random.seed(args.seed)

    input_tokens = args.input_tokens
    output_tokens = args.output_tokens
    
    input_list, output_list = sample_requests(
        args.num_prompts,
        input_tokens,
        output_tokens,
        args.max_input_tokens,
        args.max_output_tokens,
        args.input_output_type,
        args.seed,
    )

    datasets = get_datasets(input_tokens, args.num_prompts, tokenizer, args.data_path)
    
    prompts = []
    for i in range(args.num_prompts):
        if input_list[i][0] is None:
            prompts.append(
                RequestFuncInput(
                    datasets[i],
                    api_url,
                    input_list[i][1],
                    output_list[i],
                    args.model,
                    args.serve_name,
                    args.api_key,
                    1,
                    None,
                    None,
                    True,
                )
            )
        else:
            # this pass is used for test str input.
            prompts.append(
                RequestFuncInput(
                    input_list[i][0],
                    api_url,
                    input_list[i][1],
                    output_list[i],
                    args.model,
                    args.serve_name,
                    args.api_key,
                    1,
                    None,
                    None,
                    True,
                )
            )
    if args.warnup:
        # warm up
        print("warm up start ...")
        await benchmark(prompts[:1], args.time_interval)
        print("warm up end ...")
        args.warnup = False

    print(f"input token length: {input_tokens}, generate tokens len: {output_tokens}, batch size: {args.num_prompts}")
    
    # benchmark
    benchmark_start_time = time.perf_counter()
    outputs = await benchmark(prompts, args.time_interval)
    benchmark_end_time = time.perf_counter()
    benchmark_duration = benchmark_end_time - benchmark_start_time

    selected_percentile_metrics = args.percentile_metrics.split(",")
    selected_percentiles = [float(p) for p in args.metric_percentiles.split(",")]
    metrics, actual_output_lens = calculate_metrics(
        outputs=outputs,
        dur_s=benchmark_duration,
        tokenizer=tokenizer,
        selected_percentile_metrics=selected_percentile_metrics,
        selected_percentiles=selected_percentiles
    )

    print("{s:{c}^{n}}".format(s=' Serving Benchmark Result ', n=50, c='='))
    print("{:<40} {:<10}".format("Successful requests:", metrics.completed))
    print("{:<40} {:<10.2f}".format("Request throughput (req/s):", metrics.request_throughput))
    print("{:<40} {:<10.2f}".format("Benchmark duration (s):", benchmark_duration))
    print("{:<40} {:<10}".format("Total input tokens:", metrics.total_input))
    print("{:<40} {:<10}".format("Total generated tokens:", metrics.total_output))
    print("{:<40} {:<10.2f}".format("Output Token throughput (tok/s):", metrics.output_throughput))
    print("{:<40} {:<10.2f}".format("Total Token throughput (tok/s):", metrics.total_token_throughput))
   
    # # 将结果保存到列表中
    all_results = [(input_tokens, output_tokens, args.num_prompts,
            round(metrics.output_throughput, 2),
            round(metrics.mean_tpot_ms, 2),
            round(metrics.median_tpot_ms, 2),
            round(metrics.total_token_throughput, 2),
            round(metrics.mean_ttft_ms, 2), 
            round(metrics.median_ttft_ms, 2), 
            round(metrics.max_ttft_ms, 2), 
            round(metrics.min_ttft_ms, 2), 
            round(metrics.mean_itl_ms, 2),
            round(metrics.mean_e2el_ms, 2),
            round(benchmark_duration, 2),
            metrics.percentiles_ttft_ms,
            metrics.percentiles_itl_ms,
    )]
    
    # 写入 CSV 文件
    write_results_to_csv(args.output_csv, args.model_name, all_results) 
    
    result = {
        "duration": benchmark_duration,
        "completed": metrics.completed,
        "total_input_tokens": metrics.total_input,
        "total_output_tokens": metrics.total_output,
        "request_throughput": metrics.request_throughput,
        "output_throughput": metrics.output_throughput,
        "output_throughput_per_request": metrics.output_throughput_per_request,
        "total_token_throughput": metrics.total_token_throughput,
        "input_lens": [output.prompt_len for output in outputs],
        "output_lens": [output.output_len for output in outputs],
        "ttfts": [output.ttft for output in outputs],
        "itls": [output.itl for output in outputs],
        "latency": [output.latency for output in outputs],
        "generated_texts": [output.generated_text for output in outputs],
        "errors": [output.error for output in outputs],
    }

    def process_one_metric(
        # E.g., "ttft"
        metric_attribute_name: str,
        # E.g., "TTFT"
        metric_name: str,
        # E.g., "Time to First Token"
        metric_header: str,
    ):
        # This function prints and adds statistics of the specified
        # metric.
        if metric_attribute_name not in selected_percentile_metrics:
            return
        
        print("{s:{c}^{n}}".format(s=metric_header, n=50, c='-'))
        print("{:<40} {:<10.2f}".format(f"Mean {metric_name} (ms):",getattr(metrics, f"mean_{metric_attribute_name}_ms")))
        print("{:<40} {:<10.2f}".format(f"Median {metric_name} (ms):",getattr(metrics, f"median_{metric_attribute_name}_ms")))
        print("{:<40} {:<10.2f}".format(f"Max {metric_name} (ms):",getattr(metrics, f"max_{metric_attribute_name}_ms")))
        print("{:<40} {:<10.2f}".format(f"Min {metric_name} (ms):",getattr(metrics, f"min_{metric_attribute_name}_ms")))
        
        result[f"mean_{metric_attribute_name}_ms"] = getattr(metrics, f"mean_{metric_attribute_name}_ms")
        result[f"median_{metric_attribute_name}_ms"] = getattr(metrics, f"median_{metric_attribute_name}_ms")
        result[f"std_{metric_attribute_name}_ms"] = getattr(metrics, f"std_{metric_attribute_name}_ms")
        
        for p, value in getattr(metrics,f"percentiles_{metric_attribute_name}_ms"):
            p_word = str(int(p)) if int(p) == p else str(p)
            print("{:<40} {:<10.2f}".format(f"P{p_word} {metric_name} (ms):",value))
            result[f"p{p_word}_{metric_attribute_name}_ms"] = value

    process_one_metric("ttft", "TTFT", "Time to First Token")
    process_one_metric("itl", "ITL", "Inter-token Latency")
    process_one_metric("tpot", "TPOT","Time per Output Token (excl. 1st token)")
    process_one_metric("e2el", "E2EL", "End-to-end Latency")
    print("=" * 50)
    
    # Determine output file name
    if args.output_json:
        output_file_name = args.output_file
    else:
        now = datetime.now().strftime("%m%d")

    output_file_name = f"vllm_{now}_{args.num_prompts}_{input_tokens}_{output_tokens}.jsonl"
    # Append results to a JSONL file
    with open(output_file_name, "a") as file:
        file.write(json.dumps(result) + "\n")
    


if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument("--model", type=str, default=None)
    parser.add_argument("--model-name", type=str, default="R1")
    parser.add_argument("--serve-name", type=str, default=None)
    parser.add_argument("--api-key", type=str, default=None)
    parser.add_argument("--num-prompts", type=int, default=128)
    parser.add_argument("--input-tokens", type=int, default=128)
    parser.add_argument(
        "--max-input-tokens",
        type=int,
        default=-1,
        help="Use for generate random length of input, limit min input length",
    )
    parser.add_argument("--output-tokens", type=int, default=128)
    parser.add_argument(
        "--max-output-tokens",
        type=int,
        default=-1,
        help="Use for generate random length of output, limit max output length",
    )
    parser.add_argument("--input-output-type", type=str, default="fix",choices=['fix','normal','uniform'])
    parser.add_argument("--data_path", type=str, default="./multifieldqa.jsonl")
    parser.add_argument("--host", type=str, default="127.0.0.1")
    parser.add_argument("--port", type=int, default=8000)
    parser.add_argument("--time-interval", type=float, default=0.0)
    parser.add_argument("--target", type=int, default=None)
    parser.add_argument("--seed", type=int, default=12345)
    parser.add_argument(
        "--percentile-metrics",
        type=str,
        default="ttft,tpot",
        help="Comma-seperated list of selected metrics to report percentils. "
        "This argument specifies the metrics to report percentiles. "
        "Allowed metric names are \"ttft\", \"tpot\", \"itl\", \"e2el\". "
        "Default value is \"ttft,tpot,itl\".")
    parser.add_argument(
        "--metric-percentiles",
        type=str,
        default="25,50,75,99",
        help="Comma-seperated list of percentiles for selected metrics. "
        "To report 25-th, 50-th, and 75-th percentiles, use \"25,50,75\". "
        "Default value is \"99\". "
        "Use \"--percentile-metrics\" to select metrics.",
    )
    parser.add_argument('--warnup',default=True, action='store_true')
    parser.add_argument('--real_data', default=False, action='store_true')
    parser.add_argument("--tensor-parallel-size", "-tp", type=int, default=16)
    parser.add_argument("--pipeline-parallel-size", "-pp", type=int, default=1)
    parser.add_argument("--max-model-len", type=int, default=1)
    parser.add_argument("--gmz", type=str, default="")
    parser.add_argument("--quant-type", type=str, default="")
    parser.add_argument("--data-type", type=str, default="")
    parser.add_argument(
        '--output-csv',
        type=str,
        default='results-zrl.csv',
        help="Path to save the CSV file with test results")
    parser.add_argument(
        '--output-json',
        type=str,
        default=None,
        help="Path to save the json file with test results")
    parser.add_argument('--max-concurrency', nargs="+", type=int,
        help='A list, Maximum number of concurrent requests. default is'
            ' [1, 2, 4, 8, 16, 32, 64, 128, 256] ',
        default=[1, 1, 2, 4, 8, 16, 32, 64, 128]
        )
    parser.add_argument(
        '--task-list',
        type=str,
        required=True,
        help='JSON formatted task list specifying prompt and output length')

    args = parser.parse_args()

    if args.real_data:
        args.data_type = "real"
    else:
        args.data_type = "fake"   

    if args.serve_name is None:
        args.serve_name = args.model  

    task_list = json.loads(args.task_list)
    for (in_len, out_len) in task_list:
        for bs in args.max_concurrency:
            args.num_prompts = bs
            args.input_tokens = in_len
            args.output_tokens = out_len
            asyncio.run(main(args))

