import itertools
import torch
import os
import argparse
import numpy as np
import pandas as pd
from transformers import AutoTokenizer, AutoModelForCausalLM
from vllm import LLM, SamplingParams
import time


# Run the experiment
if __name__ == "__main__":

    # Parse command-line arguments
    parser = argparse.ArgumentParser(description='Run vLLM Experiment with different Tensor Parallel Sizes.')
    parser.add_argument('--tp', type=int, default=1, help='Tensor parallel size.')
    parser.add_argument('--model', type=str, default='Qwen2.5-14B', help='Model name as in local directory.')
    parser.add_argument('--test', type=str, default='test', help='The setup of test conditions.')
    parser.add_argument('--auto_device_select', type=bool, default=True, help='Whether to do auto CUDA_VISIBLE_DEVICES select based on TP.')

    args = parser.parse_args()
    TP = args.tp
    model = args.model
    test_cond = args.test
    auto_device_select = args.auto_device_select

    print(f"Running experiment for model={model} with TP={TP}")

    #GLM-4-9b
    #TP = 4
    #model = 'glm-4-9b'
    #test_cond = 'batch_3'
    #auto_device_select = True
    #model_path = f'/mnt/workspace/glm-ckpts/{model}'

    #Qwen2.5-14B
    #TP = 4
    #model = 'Qwen2.5-14B'
    #test_cond = 'batch_3'
    #auto_device_select = True
    #model_path = f'/mnt/workspace/qwen-ckpts/{model}'

    #Qwen2.5-32B/
    #TP = 1
    #model = 'llama2-7B'
    #test_cond = 'batch_3'
    #auto_device_select = True
    #model_path = f'/mnt/workspace/datasets/huggingface/Llama-2-7b-hf'

    # Qwen2.5-70B/
    TP = 4
    model = 'Qwen2.5-70B'
    test_cond = 'batch_3'
    auto_device_select = True
    model_path = f'/mnt/workspace/qwen-ckpts/{model}'

    # Set the GPU affinity according to TP
    # Adjust as per your requirement
    if auto_device_select:
        if TP == 1 or TP == 16:
            os.system('export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15')
        elif TP == 2:
            os.system('export CUDA_VISIBLE_DEVICES=4,7,5,6,1,2,0,3,12,15,13,14,9,10,8,11')
        elif TP == 4:
            os.system('export CUDA_VISIBLE_DEVICES=4,5,7,6,0,1,3,2,9,8,10,11,13,12,14,15')
        elif TP == 8:
            os.system('export CUDA_VISIBLE_DEVICES=4,5,7,6,2,3,1,0,13,12,14,15,11,10,8,9')
        else:
            raise ValueError("Unsupported TP value")

    # Load model using vLLM
    llm = LLM(model=model_path, dtype="half", tensor_parallel_size=TP, trust_remote_code=True)  # Use float16 to avoid bfloat16 compatibility issue

    # Define experimental conditions
    if test_cond == 'batch_1':
        input_token_sizes =  [256, 512, 1000, 1500, 3000, 10000]
        output_token_sizes = [128]
        batch_sizes = [1, 4, 8, 16, 32, 64, 128, 256, 512]
    elif test_cond == 'batch_2':
        input_token_sizes =  [256, 512, 1000, 1500, 3000, 10000]
        output_token_sizes = [256]
        batch_sizes = [1, 4, 8, 16, 32, 64, 128, 256, 512]
    elif test_cond == 'batch_3':
        input_token_sizes =  [ 512,]
        output_token_sizes = [ 512, 1024,4096]
        batch_sizes = [64]
    elif test_cond == 'maxbatch':
        input_token_sizes =  [512]
        output_token_sizes = [128]
        batch_sizes = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512]
    else:
        input_token_sizes =  [256, 512]
        output_token_sizes = [128]
        batch_sizes = [1, 4, 8, 16]


    conditions = list(itertools.product(input_token_sizes, output_token_sizes, batch_sizes))

    # Experiment script
    def run_experiment():
        results = []
        for input_size, output_size, batch_size in conditions:

            # Prepare input
            input_text = "This is a test input." * (input_size // len("This is a test input."))
            inputs = [input_text] * batch_size

            # Generate output using vLLM and collect performance metrics
            sampling_params = SamplingParams(max_tokens=output_size,ignore_eos=True,temperature=0,)

            start = time.perf_counter()
            print(f"Running batch_size={batch_size} with input_size={input_size} with output_size={output_size}")

            outputs = llm.generate(inputs, sampling_params)
            end = time.perf_counter()

            # Extract performance metrics from vLLM's output
            total_ttfts = []
            total_tps = []
            single_list = []

            for output in outputs:
                ttft_ = output.metrics.first_token_time - output.metrics.arrival_time
                tps_ = output_size / (output.metrics.finished_time - output.metrics.first_token_time)
                single = output.metrics.finished_time - output.metrics.arrival_time
                total_ttfts.append(ttft_)
                total_tps.append(tps_)
                single_list.append(single)

            ttft_mean = np.mean(total_ttfts)
            ttft_min  = np.min(total_ttfts)
            ttft_max  = np.max(total_ttfts)
            single_sum = np.sum(single_list)
            tps_mean = np.mean(total_tps)
            tps_min = np.min(total_tps)
            tps_max = np.max(total_tps)

            results.append({
                "input_size": input_size,
                "output_size": output_size,
                "batch_size": batch_size,
                "ttft_mean": ttft_mean,
                "ttft_min": ttft_min,
                "ttft_max": ttft_max,
                "single_sum": single_sum,
                "tps_mean": tps_mean,
                "tps_min": tps_min,
                "tps_max": tps_max,
                "total_time": end-start
            })

            # Print intermediate results
      

        return results

    path=f'/mnt/workspace/guosj/vllmtest/results_{test_cond}/'
    os.makedirs(path,exist_ok=True)
    os.chdir(path)

    experiment_results = run_experiment()

    df = pd.DataFrame(experiment_results)
    df.to_csv(f"{path}/{model}_TP{TP}_results.csv")
