import torch
import triton
import triton.language as tl
import matplotlib.pyplot as plt
import numpy as np
import csv
from datetime import datetime

# Define the kernel for abs operation
@triton.jit
def abs_kernel(
    x_ptr,
    output_ptr,
    n_elements,
    BLOCK_SIZE: tl.constexpr,
):
    # Program ID
    pid = tl.program_id(axis=0)
    # Block start
    block_start = pid * BLOCK_SIZE
    # Offsets
    offsets = block_start + tl.arange(0, BLOCK_SIZE)
    # Create a mask to handle the case where n_elements is not a multiple of BLOCK_SIZE
    mask = offsets < n_elements
    # Load data
    x = tl.load(x_ptr + offsets, mask=mask)
    # Compute abs
    output = tl.abs(x)
    # Store the result
    tl.store(output_ptr + offsets, output, mask=mask)

# Function to run the kernel
def abs_triton(x):
    output = torch.empty_like(x)
    n_elements = x.numel()
    grid = (triton.cdiv(n_elements, 1024),)
    abs_kernel[grid](x, output, n_elements, 1024)
    return output

# Function to run PyTorch's abs
def abs_torch(x):
    return torch.abs(x)

# Benchmark sizes from 1K to 256M, increasing by powers of 2
sizes = [2**i for i in range(10, 29)]  # 1K to 256M

# Create benchmark functions
def benchmark_triton(size):
    x = torch.randn(size, device='cuda', dtype=torch.float32)
    return lambda: abs_triton(x)

def benchmark_torch(size):
    x = torch.randn(size, device='cuda', dtype=torch.float32)
    return lambda: abs_torch(x)

# Results storage
triton_times = []
torch_times = []

# Run benchmarks for each size
for size in sizes:
    print(f"Benchmarking size: {size}")
    
    # Triton benchmark
    triton_fn = benchmark_triton(size)
    triton_time = triton.testing.do_bench(triton_fn, warmup=25, rep=100)
    triton_times.append(triton_time)
    
    # PyTorch benchmark
    torch_fn = benchmark_torch(size)
    torch_time = triton.testing.do_bench(torch_fn, warmup=25, rep=100)
    torch_times.append(torch_time)
    
    # Free memory to avoid OOM for large sizes
    if size >= 2**26:  # 64M or larger
        torch.cuda.empty_cache()

# Calculate bandwidth in GB/s
dtype_size = 4  # float32 is 4 bytes
triton_bandwidth = [(2 * size * dtype_size) / (time * 1e6) for size, time in zip(sizes, triton_times)]
torch_bandwidth = [(2 * size * dtype_size) / (time * 1e6) for size, time in zip(sizes, torch_times)]

# Convert sizes to more readable format (K, M)
readable_sizes = []
for size in sizes:
    if size >= 1024 * 1024:
        readable_sizes.append(f"{size / (1024 * 1024):.0f}M")
    else:
        readable_sizes.append(f"{size / 1024:.0f}K")

# Plot the results
plt.figure(figsize=(12, 6))
plt.plot(readable_sizes, triton_bandwidth, 'o-', label='Triton')
plt.plot(readable_sizes, torch_bandwidth, 'o-', label='PyTorch')
plt.xlabel('Tensor Size (elements)')
plt.ylabel('Bandwidth (GB/s)')
plt.title('Abs Operation Bandwidth Comparison')
plt.legend()
plt.grid(True)
plt.xticks(rotation=45)
plt.tight_layout()

# Save the plot
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
plt.savefig(f'abs_benchmark_{timestamp}.png')

# Save the results to a CSV file
with open(f'abs_benchmark_{timestamp}.csv', 'w', newline='') as csvfile:
    writer = csv.writer(csvfile)
    writer.writerow(['Size', 'Size (readable)', 'Triton Time (s)', 'PyTorch Time (s)', 
                     'Triton Bandwidth (GB/s)', 'PyTorch Bandwidth (GB/s)'])
    for i, size in enumerate(sizes):
        writer.writerow([size, readable_sizes[i], triton_times[i], torch_times[i], 
                         triton_bandwidth[i], torch_bandwidth[i]])

print(f"Benchmark completed. Results saved to abs_benchmark_{timestamp}.csv and abs_benchmark_{timestamp}.png")

# Print a summary of the results
print("\nSummary:")
print(f"{'Size':<10} {'Triton (GB/s)':<15} {'PyTorch (GB/s)':<15} {'Speedup':<10}")
print("-" * 50)
for i, size in enumerate(sizes):
    speedup = torch_times[i] / triton_times[i]
    print(f"{readable_sizes[i]:<10} {triton_bandwidth[i]:<15.2f} {torch_bandwidth[i]:<15.2f} {speedup:<10.2f}x") 