import numpy as np
import matplotlib.pyplot as plt
from matplotlib.ticker import MultipleLocator

import matplotlib
matplotlib.use('Agg')

# Fixed parameters
L_external = 1000
T_operation = 1000000  # ns (增加计算量)
T_sync_lock_base = 5000  # ns (调整同步基础时间)

# Define ranges for c and d
c_values = [0.7, 0.85, 1.0]  # Computation efficiency constant
d_values = [0.8, 1.2, 1.6]   # Synchronization overhead constant

# Thread count range
P_range = np.arange(1, 33, 1)  # 1 to 32 threads

# Create plots
fig, axes = plt.subplots(2, 2, figsize=(15, 12))
fig.suptitle('Revised Multithreading Performance Analysis', fontsize=14, fontweight='bold')

# Revised model: T_total = L_external × [T_operation/(c×P) + T_sync_lock_base×(d×P)]
# More realistic: computation scales with 1/P, sync scales with P

# Plot 1: Effect of different c values (fixed d=1.2)
ax1 = axes[0, 0]
d_fixed = 1.2
for c in c_values:
    total_times = []
    for P in P_range:
        if P == 0:
            continue
        # Revised model: computation ~ 1/P, sync ~ P
        T_compute = T_operation / (c * P)
        T_sync = T_sync_lock_base * (d_fixed * P)
        T_total = L_external * (T_compute + T_sync)
        total_times.append(T_total / 1000000)  # Convert to milliseconds
    
    ax1.plot(P_range[:len(total_times)], total_times, marker='o', label=f'c={c}', linewidth=2)
    
    # Find optimal thread count
    optimal_idx = np.argmin(total_times)
    P_opt = P_range[optimal_idx]
    T_opt = total_times[optimal_idx]
    ax1.scatter(P_opt, T_opt, s=100, zorder=5)

ax1.set_xlabel('Thread Count (P)')
ax1.set_ylabel('Total Execution Time (ms)')
ax1.set_title(f'Effect of Computation Efficiency c (d={d_fixed})')
ax1.legend()
ax1.grid(True, alpha=0.3)
ax1.xaxis.set_major_locator(MultipleLocator(4))

# Plot 2: Effect of different d values (fixed c=0.85)
ax2 = axes[0, 1]
c_fixed = 0.85
for d in d_values:
    total_times = []
    for P in P_range:
        if P == 0:
            continue
        T_compute = T_operation / (c_fixed * P)
        T_sync = T_sync_lock_base * (d * P)
        T_total = L_external * (T_compute + T_sync)
        total_times.append(T_total / 1000000)  # Convert to milliseconds
    
    ax2.plot(P_range[:len(total_times)], total_times, marker='s', label=f'd={d}', linewidth=2)
    
    # Find optimal thread count
    optimal_idx = np.argmin(total_times)
    P_opt = P_range[optimal_idx]
    T_opt = total_times[optimal_idx]
    ax2.scatter(P_opt, T_opt, s=100, zorder=5)

ax2.set_xlabel('Thread Count (P)')
ax2.set_ylabel('Total Execution Time (ms)')
ax2.set_title(f'Effect of Synchronization Overhead d (c={c_fixed})')
ax2.legend()
ax2.grid(True, alpha=0.3)
ax2.xaxis.set_major_locator(MultipleLocator(4))

# Plot 3: Time composition analysis (c=0.85, d=1.2)
ax3 = axes[1, 0]
c_fixed = 0.85
d_fixed = 1.2

compute_times = []
sync_times = []
total_times = []

for P in P_range:
    if P == 0:
        continue
    T_compute = L_external * T_operation / (c_fixed * P) / 1000000  # ms
    T_sync = L_external * T_sync_lock_base * (d_fixed * P) / 1000000  # ms
    T_total = T_compute + T_sync
    
    compute_times.append(T_compute)
    sync_times.append(T_sync)
    total_times.append(T_total)

ax3.plot(P_range[:len(compute_times)], compute_times, marker='^', label='Computation Time', linewidth=2, color='blue')
ax3.plot(P_range[:len(sync_times)], sync_times, marker='v', label='Synchronization Time', linewidth=2, color='red')
ax3.plot(P_range[:len(total_times)], total_times, marker='o', label='Total Time', linewidth=3, color='green')

# Mark optimal thread count
optimal_idx = np.argmin(total_times)
P_opt = P_range[optimal_idx]
T_opt = total_times[optimal_idx]
ax3.axvline(x=P_opt, color='gray', linestyle='--', alpha=0.7, label=f'Optimal Threads: {P_opt}')
ax3.scatter(P_opt, T_opt, s=100, color='darkgreen', zorder=5)

ax3.set_xlabel('Thread Count (P)')
ax3.set_ylabel('Time (ms)')
ax3.set_title(f'Time Composition Analysis (c={c_fixed}, d={d_fixed})')
ax3.legend()
ax3.grid(True, alpha=0.3)
ax3.xaxis.set_major_locator(MultipleLocator(4))

# Plot 4: Speedup analysis (c=0.85, d=1.2)
ax4 = axes[1, 1]
c_fixed = 0.85
d_fixed = 1.2

speedups = []
efficiencies = []

single_thread_time = L_external * (T_operation + T_sync_lock_base) / 1000000  # ms

for P in P_range:
    if P == 0:
        continue
    T_compute = T_operation / (c_fixed * P)
    T_sync = T_sync_lock_base * (d_fixed * P)
    T_total = L_external * (T_compute + T_sync) / 1000000  # ms
    
    speedup = single_thread_time / T_total
    efficiency = speedup / P * 100  # percentage
    
    speedups.append(speedup)
    efficiencies.append(efficiency)

ax4.plot(P_range[:len(speedups)], speedups, marker='o', label='Speedup', linewidth=2, color='purple')
ax4.plot(P_range[:len(efficiencies)], efficiencies, marker='s', label='Efficiency (%)', linewidth=2, color='orange')

ax4.set_xlabel('Thread Count (P)')
ax4.set_ylabel('Speedup / Efficiency (%)')
ax4.set_title('Speedup and Parallel Efficiency')
ax4.legend()
ax4.grid(True, alpha=0.3)
ax4.xaxis.set_major_locator(MultipleLocator(4))

plt.tight_layout()
plt.savefig('total_time_and_thread_num.png', dpi=300, bbox_inches='tight')
plt.show()

# Output analysis results
print("Revised Performance Model Analysis")
print("=" * 60)
print("Model: T_total = L_external × [T_operation/(c×P) + T_sync_lock_base×(d×P)]")
print("=" * 60)
print(f"Fixed parameters:")
print(f"L_external = {L_external}")
print(f"T_operation = {T_operation:,} ns")
print(f"T_sync_lock_base = {T_sync_lock_base:,} ns")
print(f"Single thread time = {single_thread_time:.1f} ms")
print()

print("Optimal Thread Count Analysis:")
print("-" * 50)
for c in c_values:
    for d in d_values:
        min_time = float('inf')
        optimal_P = 1
        for P in P_range:
            if P == 0:
                continue
            T_total = L_external * (T_operation/(c*P) + T_sync_lock_base*(d*P)) / 1000000
            if T_total < min_time:
                min_time = T_total
                optimal_P = P
        
        print(f"c={c}, d={d}: Optimal P={optimal_P:2d}, Time={min_time:.1f} ms")

print("=" * 60)
print("Key Insights:")
print("1. Higher computation load (T_operation) allows more threads")
print("2. Higher sync overhead (d) reduces optimal thread count") 
print("3. Better parallel efficiency (c) increases optimal thread count")
print("4. Real-world optimal thread count typically around 8-16 for balanced workloads")
