import sys
import numpy as np
import matplotlib.pyplot as plt

def plot_latency_vs_time():
    # Generate synthetic data
    num_samples = 100
    time = np.arange(num_samples)
    network_latency = np.random.normal(loc=10, scale=2, size=num_samples)  # Network I/O latency
    query_latency = np.random.normal(loc=5, scale=1, size=num_samples)     # Query execution latency
    disk_latency = np.random.normal(loc=15, scale=3, size=num_samples)     # Disk I/O latency

    # Calculate total latency (sum of all layers)
    total_latency = network_latency + query_latency + disk_latency

    # Create line plot
    plt.figure(figsize=(10, 6))
    plt.plot(time, network_latency, label='Network Latency', linestyle='-', marker='o')
    plt.plot(time, query_latency, label='Query Latency', linestyle='-', marker='s')
    plt.plot(time, disk_latency, label='Disk Latency', linestyle='-', marker='^')
    plt.plot(time, total_latency, label='Total Latency', linestyle='-', marker='x')
    plt.xlabel('Time')
    plt.ylabel('Latency')
    plt.title('Latency at Different Layers')
    plt.legend()
    plt.grid(True)
    plt.show()

# Function to generate synthetic latency data for a single benchmark run
def generate_latency_data(num_samples=1000):
    network_latency = np.random.normal(loc=10, scale=2, size=num_samples)  # Network I/O latency
    query_latency = np.random.normal(loc=5, scale=1, size=num_samples)     # Query execution latency
    disk_latency = np.random.normal(loc=15, scale=3, size=num_samples)     # Disk I/O latency
    return network_latency, query_latency, disk_latency

# 由chatgpt 生成
def plot_different_run_in_separate_subplot():
    # Generate synthetic latency data for ten benchmark runs
    num_runs = 3
    num_samples_per_run = 1000

    # Create subplots for each benchmark run
    fig, axs = plt.subplots(num_runs, figsize=(10, 3*num_runs))

    for run in range(num_runs):
        network_latency, query_latency, disk_latency = generate_latency_data(num_samples=num_samples_per_run)
        
        # Plot latency data in the corresponding subplot
        axs[run].plot(network_latency, label='Network Latency', linestyle='-', marker='o')
        axs[run].plot(query_latency, label='Query Latency', linestyle='-', marker='s')
        axs[run].plot(disk_latency, label='Disk Latency', linestyle='-', marker='^')
        axs[run].set_xlabel('Sample')
        axs[run].set_ylabel('Latency')
        axs[run].set_title(f'Benchmark Run {run+1}: Latency at Different Layers')
        axs[run].legend()
        axs[run].grid(True)

    # Adjust layout to prevent overlap
    plt.tight_layout()
    plt.show()

if __name__ == '__main__':
    n = sys.argv[1]
    if n == '1':
        plot_latency_vs_time()
    elif n == '3':
        plot_different_run_in_separate_subplot()