import itertools
import torch
import os
import csv
from modelscope import AutoTokenizer, AutoModelForCausalLM
from vllm import LLM, SamplingParams

# Set the GPU affinity
# os.system('export CUDA_VISIBLE_DEVICES=4,7,5,6,1,2,0,3,12,15,13,14,9,10,8,11') # TP = 2
# os.system('export CUDA_VISIBLE_DEVICES=4,5,7,6,0,1,3,2,9,8,10,11,13,12,14,15') # TP = 4
# os.system('export CUDA_VISIBLE_DEVICES=4,5,7,6,2,3,1,0,13,12,14,15,11,10,8,9') # TP = 8
# os.system('export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15') # TP = 1 or 16

# Load model using vLLM
model_path = '/mnt/workspace/qwen-ckpts'
llm = LLM(model=model_path, dtype="half", tensor_parallel_size=1)  # Use float16 to avoid bfloat16 compatibility issue


# Define experimental conditions
input_token_sizes = [1000, 2000]
output_token_sizes = [500, 1000]
batch_sizes = [4, 16, 32, 64, 128]
conditions = list(itertools.product(input_token_sizes, output_token_sizes, batch_sizes))

# Experiment script
def run_experiment():
    results = []
    for input_size, output_size, batch_size in conditions:
        # Prepare input
        input_text = "This is a test input." * (input_size // len("This is a test input."))
        inputs = [input_text] * batch_size

        # Generate output using vLLM and collect performance metrics
        sampling_params = SamplingParams(max_tokens=output_size)
        output = llm.generate(inputs, sampling_params)

        # Extract performance metrics from vLLM's output
        metrics = output[0].metrics
        first_token_latency = metrics.first_token_time - metrics.first_scheduled_time
        input_tokens_per_second = input_size / (metrics.first_token_time - metrics.arrival_time)
        output_tokens_per_second = output_size / (metrics.finished_time - metrics.first_token_time)
        total_time = metrics.finished_time - metrics.arrival_time

        results.append({
            "input_size": input_size,
            "output_size": output_size,
            "batch_size": batch_size,
            "first_token_latency": first_token_latency,
            "input_tokens_per_second": input_tokens_per_second,
            "output_tokens_per_second": output_tokens_per_second,
            "total_time": total_time
        })

        # Print intermediate results
        print(f"Input Size: {input_size}, Output Size: {output_size}, Batch Size: {batch_size}\n"
              f"First Token Latency: {first_token_latency:.4f}s, Input Tokens per Second: {input_tokens_per_second:.2f}, Output Tokens per Second: {output_tokens_per_second:.2f}, Total Time: {total_time:.4f}s\n")

    return results

# Run the experiment
if __name__ == "__main__":
    experiment_results = run_experiment()

    # Optionally, save results to a file
    with open("experiment_results.txt", "w") as f:
        for result in experiment_results:
            f.write(str(result) + "\n")

    # Convert experiment_results.txt to CSV
    with open("experiment_results.txt", "r") as f:
        lines = f.readlines()
        results = [eval(line.strip()) for line in lines]

    with open("experiment_results.csv", "w", newline="") as csvfile:
        fieldnames = ["input_size", "output_size", "batch_size", "first_token_latency", "input_tokens_per_second", "output_tokens_per_second", "total_time"]
        writer = csv.DictWriter(csvfile, fieldnames=fieldnames)

        writer.writeheader()
        for result in results:
            writer.writerow(result)