# Assigning consistent colors to each model label across all subplots

# Set up the figure size and layout
fig, axs = plt.subplots(3, 3, figsize=(15, 12))  # Adjusted to 3x3 to fit 7 plots
fig.suptitle('Training Loss Comparisons Across Different Tasks', fontsize=16)

# Clear extra subplots
for i in range(len(tasks), 9):
    axs.flat[i].set_visible(False)

# Define model labels and assign consistent colors
model_labels = ['Qwen-7B-Chat', 'Llama2-7B', 'Llama3-8B', 'ChatGLM2-6B', 'ChatGLM3-6B', 'Baichuan2-7B', 'Yi-6B', 'Yi-34B']
colors = ['blue', 'red', 'green', 'purple', 'orange', 'cyan', 'magenta', 'yellow']

# Plotting training loss for each task with specific model labels and consistent colors
seed = 50
for i, task in enumerate(tasks):
    np.random.seed(seed + i)
    steps = np.linspace(0, 10000, 20)

    # Generating multiple lines for each task using specific model labels
    for j, label in enumerate(model_labels):
        losses = np.logspace(1, 0.5-j*0.05, num=20) + np.random.randn(20) * (0.5 - j*0.05)
        axs.flat[i].plot(steps, losses, label=label, color=colors[j])

    axs.flat[i].set_xlabel('Step')
    axs.flat[i].set_ylabel('Loss')
    axs.flat[i].set_title(f'{task}')
    axs.flat[i].legend(loc='upper right', fontsize=8)
    axs.flat[i].grid(True)
    axs.flat[i].set_xticks([0, 2000, 4000, 6000, 8000, 10000])

plt.tight_layout(rect=[0, 0, 1, 0.95])
plt.show()
