import itertools
import torch
import os
import csv
import argparse
import pandas as pd
from transformers import AutoTokenizer, AutoModelForCausalLM
from vllm import LLM, SamplingParams
# Run the experiment
if __name__ == "__main__":

    # Parse command-line arguments
    parser = argparse.ArgumentParser(description='Run vLLM Experiment with different Tensor Parallel Sizes.')
    parser.add_argument('--tp', type=int, default=1, help='Tensor parallel size.')
    parser.add_argument('--model', type=str, default='Llama-2-7b-hf', help='Model name as in local directory.')

    args = parser.parse_args()

    TP = args.tp
    model = args.model
    print(f"Running experiment for model={model} with TP={TP}")

    # Set the GPU affinity according to TP
    # Adjust as per your requirement
    if TP == 1:
        os.system('export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15')
    elif TP == 2:
        os.system('export CUDA_VISIBLE_DEVICES=4,7,5,6,1,2,0,3,12,15,13,14,9,10,8,11')
    elif TP == 4:
        os.system('export CUDA_VISIBLE_DEVICES=4,5,7,6,0,1,3,2,9,8,10,11,13,12,14,15')
    elif TP == 8:
        os.system('export CUDA_VISIBLE_DEVICES=4,5,7,6,2,3,1,0,13,12,14,15,11,10,8,9')
    else:
        raise ValueError("Unsupported TP value")

    # Load model using vLLM
    model_path = f'/mnt/workspace/ckpts/llama2-ckpts/{model}'
    llm = LLM(model=model_path, dtype="half", tensor_parallel_size=TP)  # Use float16 to avoid bfloat16 compatibility issue

    # Define experimental conditions
#    seq_lengths = [128, 256]
#    batch_sizes = [1, 2, 4]

    seq_lengths = [128, 256, 512, 1024, 2048, 4096]
    batch_sizes = [1, 2, 4, 16, 64, 128, 256, 512]

#    input_token_sizes = [10000, 20000]
#    output_token_sizes = [500, 1000]
#    batch_sizes = [4, 16, 32, 64, 128]


    conditions = list(itertools.product(seq_lengths, batch_sizes))

    # Experiment script
    def run_experiment():
        results = []
        for seq_length, batch_size in conditions:
            input_size = seq_length
            output_size = seq_length

            # Prepare input
            input_text = "This is a test input." * (input_size // len("This is a test input."))
            inputs = [input_text] * batch_size

            # Generate output using vLLM and collect performance metrics
            sampling_params = SamplingParams(max_tokens=output_size,ignore_eos=True,temperature=0,)
            output = llm.generate(inputs, sampling_params)

            # Extract performance metrics from vLLM's output
            metrics = output[0].metrics
            first_token_latency = metrics.first_token_time - metrics.first_scheduled_time
            input_tokens_per_second = input_size / (metrics.first_token_time - metrics.arrival_time)
            output_tokens_per_second = output_size / (metrics.finished_time - metrics.first_token_time)
            total_time = metrics.finished_time - metrics.arrival_time

            results.append({
                "input_size": input_size,
                "output_size": output_size,
                "batch_size": batch_size,
                "first_token_latency": first_token_latency,
                "input_tokens_per_second": input_tokens_per_second,
                "output_tokens_per_second": output_tokens_per_second,
                "total_time": total_time
            })

            # Print intermediate results
            print(f"Input Size: {input_size}, Output Size: {output_size}, Batch Size: {batch_size}\n"
                f"First Token Latency: {first_token_latency:.4f}s, Input Tokens per Second: {input_tokens_per_second:.2f}, Output Tokens per Second: {output_tokens_per_second:.2f}, Total Time: {total_time:.4f}s\n")

        return results

    path=f'/mnt/workspace/vllm_test/results/'
    os.makedirs(path,exist_ok=True)
    os.chdir(path)

    experiment_results = run_experiment()

    # Optionally, save results to a file
    with open(f"{model}_TP{TP}_experiment_results.txt", "w") as f:
        for result in experiment_results:
            f.write(str(result) + "\n")

    # Convert experiment_results.txt to CSV
    with open(f"{model}_TP{TP}_experiment_results.txt", "r") as f:
        lines = f.readlines()
        results = [eval(line.strip()) for line in lines]

    with open(f"{model}_TP{TP}_experiment_results.csv", "w", newline="") as csvfile:
        fieldnames = ["input_size", "output_size", "batch_size", "first_token_latency", "input_tokens_per_second", "output_tokens_per_second", "total_time"]
        writer = csv.DictWriter(csvfile, fieldnames=fieldnames)

        writer.writeheader()
        for result in results:
            writer.writerow(result)