import itertools
import os
import argparse
import numpy as np
import pandas as pd
from transformers import AutoTokenizer, AutoModelForCausalLM
from vllm import LLM, SamplingParams
import time

def set_gpu_affinity(TP):
    if TP == 1 or TP == 16:
        os.system('export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15')
    elif TP == 2:
        os.system('export CUDA_VISIBLE_DEVICES=4,7,5,6,1,2,0,3,12,15,13,14,9,10,8,11')
    elif TP == 4:
        os.system('export CUDA_VISIBLE_DEVICES=4,5,7,6,0,1,3,2,9,8,10,11,13,12,14,15')
    elif TP == 8:
        os.system('export CUDA_VISIBLE_DEVICES=4,5,7,6,2,3,1,0,13,12,14,15,11,10,8,9')
    else:
        raise ValueError("Unsupported TP value")

def append_to_csv(results, csv_file_path):
    df = pd.DataFrame([results])
    if not os.path.exists(csv_file_path):
        df.to_csv(csv_file_path, index=False)
    else:
        df.to_csv(csv_file_path, mode='a', header=False, index=False)

def run_experiment(llm, conditions, csv_file_path):
    for input_size, output_size,batch_size in conditions:
        try:
            # Prepare input
            input_text = "This is a test input." * (input_size // len("This is a test input."))
            inputs = [input_text] * batch_size

            # Generate output using vLLM and collect performance metrics
            sampling_params = SamplingParams(
                max_tokens=output_size,
                min_tokens=output_size,
                ignore_eos=True,
                temperature=0)

            start = time.perf_counter()
            outputs = llm.generate(inputs, sampling_params)
            end = time.perf_counter()

            # Extract performance metrics from vLLM's output
            total_ttfts = []
            total_tps = []
            single_list = []

            for output in outputs:
                ttft_ = output.metrics.first_token_time - output.metrics.arrival_time
                tps_ = output_size / (output.metrics.finished_time - output.metrics.first_token_time)
                single = output.metrics.finished_time - output.metrics.arrival_time
                total_ttfts.append(ttft_)
                total_tps.append(tps_)
                single_list.append(single)

            ttft_mean = np.mean(total_ttfts)
            ttft_min  = np.min(total_ttfts)
            ttft_max  = np.max(total_ttfts)
            single_sum = np.sum(single_list)
            tps_mean = np.mean(total_tps)
            tps_min = np.min(total_tps)
            tps_max = np.max(total_tps)

            result = {
                "input_size": input_size,
                "output_size": output_size,
                "batch_size": batch_size,
                "single_sum": single_sum,
                "ttft_mean": ttft_mean,
                "tps_mean": tps_mean,
                "ttft_min": ttft_min,
                "ttft_max": ttft_max,
                "tps_min": tps_min,
                "tps_max": tps_max,
                "total_time": end-start,
                "e2e_throughput": (output_size / (end - start)) * batch_size
            }

            # Write intermediate results to CSV
            append_to_csv(result, csv_file_path)

            # Print intermediate results
            print(f"Input Size: {input_size}, Output Size: {output_size}, Batch Size: {batch_size}\n"
                  f"First Token Latency: {ttft_mean:.4f}s, Output Tokens per Second: {tps_mean:.2f}, Total Time: {end-start:.4f}s\n")

        except Exception as e:
            print(f"An error occurred during generation with out_length={output_size}, batch_size={batch_size}: {e}")

# Run the experiment
if __name__ == "__main__":
    # Parse command-line arguments
    parser = argparse.ArgumentParser(description='Run vLLM Experiment with different Tensor Parallel Sizes.')
    parser.add_argument('--tp', type=int, default=1, help='Tensor parallel size.')
    parser.add_argument('--model', type=str, default='Qwen2-0.5B', help='Model name as in local directory.')
    parser.add_argument('--test', type=str, default='test', help='The setup of test conditions.')
    parser.add_argument('--auto_device_select', type=bool, default=True, help='Whether to do auto CUDA_VISIBLE_DEVICES select based on TP.')

    args = parser.parse_args()
    TP = args.tp
    model = args.model
    test_cond = args.test
    auto_device_select = args.auto_device_select
    print(f"Running experiment for model={model} with TP={TP}")

    # Set the GPU affinity according to TP
    if auto_device_select:
        set_gpu_affinity(TP)

    # Load model using vLLM
    #需要修改为自定义路径
    model_path = f'/mnt/data/{model}'
    llm = LLM(model=model_path, dtype="half", tensor_parallel_size=TP, trust_remote_code=True)

    # Define experimental conditions
    input_token_sizes = [256, 512]
    output_token_sizes = [256, 512]
    batch_sizes = [6, 8]

    conditions = list(itertools.product(input_token_sizes, output_token_sizes,batch_sizes))

    # Initialize CSV file path
    path = f'/mnt/data/vllm/results_{test_cond}/'
    os.makedirs(path, exist_ok=True)
    csv_file_path = f"{path}/{model.replace('/', '-')}_TP{TP}_results.csv"

    # Run the experiment
    run_experiment(llm, conditions, csv_file_path)