'''
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import numpy as np
import os
# import argparse # Not used in the plotting script itself

# Loading data from CSV files
data_files = [
    'results_mag_bert.csv',
    'results_mag_bert_ood.csv',
    'results_mult.csv',
    'results_mult_ood.csv',
    'results_text.csv',
    'results_text_ood.csv'
]

# Create dummy CSV files for testing
os.makedirs('results', exist_ok=True)
np.random.seed(42) # for reproducibility
metrics_cols = ['acc', 'weighted_f1', 'f1', 'oid_acc', 'eval_acc']
for file in data_files:
    file_path = os.path.join('results', file)
    # Generate some plausible random data (e.g., scores between 60 and 95)
    data_dict = {metric: np.random.uniform(60, 95, size=1) for metric in metrics_cols}
    # Add a bit of variation based on file name for visual distinctness
    if 'ood' in file:
        for metric in metrics_cols: data_dict[metric] -= np.random.uniform(5,10)
    if 'mag' in file:
        for metric in metrics_cols: data_dict[metric] += np.random.uniform(0,5)
    if 'mintrec' in file:
        for metric in metrics_cols: data_dict[metric] -= np.random.uniform(0,5)

    df_temp = pd.DataFrame(data_dict)
    df_temp.to_csv(file_path, index=False)
print("Dummy CSV files created in 'results' directory.")


# Reading and concatenating all CSV files
dfs = []
for file in data_files:
    file_path = os.path.join('results', file)
    df = pd.read_csv(file_path)
    # Clean up source_file names for legend
    clean_name = file.split('.')[0].replace('results_', '')
    if clean_name == 'mag_bert':
        clean_name = 'MAG-BERT'
    elif clean_name == 'mag_bert_ood':
        clean_name = 'MAG-BERT (OOD)'
    elif clean_name == 'mintrec_text':
        clean_name = 'MinTRec (Text)'
    elif clean_name == 'mult':
        clean_name = 'MULT'
    elif clean_name == 'mult_ood':
        clean_name = 'MULT (OOD)'
    elif clean_name == 'text_ood':
        clean_name = 'Text (OOD)'
    else:
        clean_name = clean_name.replace('_', ' ').title()
    df['source_file'] = clean_name
    dfs.append(df)
data = pd.concat(dfs, ignore_index=True)

# Selecting key metrics for visualization
metrics_to_plot = ['acc', 'weighted_f1', 'f1', 'oid_acc', 'eval_acc']
# More concise labels for the x-axis if needed, or use full for clarity
metric_x_labels = ['Accuracy', 'Weighted F1', 'Macro F1', 'OID Acc.', 'Eval Acc.']
# Ensure the order of labels matches metrics_to_plot
metric_map = dict(zip(metrics_to_plot, metric_x_labels))


# Melt the DataFrame to long format
data_melted = data.melt(id_vars=['source_file'],
                        value_vars=metrics_to_plot,
                        var_name='metric_type',
                        value_name='score')

# Map metric_type to the desired x-axis labels for correct ordering and display
data_melted['metric_type_labeled'] = data_melted['metric_type'].map(metric_map)
# Ensure the order of metric_type_labeled for plotting
data_melted['metric_type_labeled'] = pd.Categorical(data_melted['metric_type_labeled'], categories=metric_x_labels, ordered=True)


# Setting up the plot parameters for a CCF A paper
plt.rcParams.update({
    'font.family': 'Times New Roman', # Standard for many papers
    'font.size': 14,                # Slightly larger for readability
    'axes.titlesize': 16,
    'axes.labelsize': 14,
    'xtick.labelsize': 12,
    'ytick.labelsize': 12,
    'legend.fontsize': 11,
    'figure.dpi': 300,
    'lines.linewidth': 2,           # Thicker lines
    'lines.markersize': 7           # Visible markers
})

fig, ax = plt.subplots(figsize=(10, 6)) # Adjusted figsize for a single plot

# Using a professional color palette
colors = sns.color_palette("Set2", len(data['source_file'].unique()))

# Plotting line chart
sns.lineplot(
    data=data_melted,
    x='metric_type_labeled',    # Use the ordered, labeled metrics
    y='score',
    hue='source_file',
    style='source_file', # Different marker styles for each line
    markers=True,        # Show markers
    dashes=False,        # Use solid lines, styles will differentiate
    palette=colors,
    ax=ax
)

ax.set_ylabel('Score (%)')
ax.set_xlabel('Evaluation Metric')

# --- MODIFICATION START ---
# Instead of ax.tick_params(axis='x', rotation=15, ha='right')
# Use plt.setp to set properties of existing tick labels
plt.setp(ax.get_xticklabels(), rotation=15, ha='right', rotation_mode='anchor')
# You can also use ax.tick_params for other things if needed, e.g., label size or padding
ax.tick_params(axis='x', pad=7) # Example: adjust padding if labels are too close
# --- MODIFICATION END ---

ax.grid(True, axis='y', linestyle=':', alpha=0.7)
ax.grid(True, axis='x', linestyle=':', alpha=0.3) # Light grid on x-axis too

min_score = data_melted['score'].min()
max_score = data_melted['score'].max()
ax.set_ylim(max(0, min_score - 10) , min(100, max_score + 10))

handles, labels = ax.get_legend_handles_labels()
fig.legend(handles, labels, loc='upper center', ncol=3, bbox_to_anchor=(0.5, 1.0), title='Experiments')
ax.get_legend().remove()

plt.tight_layout(rect=[0, 0, 1, 0.92 if len(data['source_file'].unique()) > 3 else 0.95])

output_filename = 'experiment_results_comparison_line.png'
plt.savefig(output_filename, dpi=300, bbox_inches='tight')
print(f"Plot saved as {output_filename}")
plt.show()
'''
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
# import numpy as np # No longer needed as dummy data generation is removed
import os
# import argparse # Not used in the plotting script itself

# Loading data from CSV files
data_files = [
    'results_mag_bert.csv',
    'results_mag_bert_ood.csv',
    'results_mult.csv',
    'results_mult_ood.csv',
    'results_text.csv',
    'results_text_ood.csv'
]

print("Attempting to load data from existing CSV files in 'results' directory...")

# Reading and concatenating all CSV files
dfs = []
# Ensure the 'results' directory exists for reading, or handle potential errors
results_dir = 'results'
if not os.path.isdir(results_dir):
    print(f"Error: Directory '{results_dir}' not found. Please ensure it exists and contains the required CSV files.")
    exit()

for file in data_files:
    file_path = os.path.join(results_dir, file)
    if not os.path.exists(file_path):
        print(f"Error: File '{file_path}' not found. Please ensure all required CSV files are present.")
        print(f"Skipping missing file: {file}")
        continue

    df = pd.read_csv(file_path)
    # Clean up source_file names for legend
    clean_name = file.split('.')[0].replace('results_', '')
    if clean_name == 'mag_bert':
        clean_name = 'MAG-BERT'
    elif clean_name == 'mag_bert_ood':
        clean_name = 'MAG-BERT (OOD)'
    elif clean_name == 'mintrec_text': # This case might not be hit with current data_files
        clean_name = 'MinTRec (Text)'
    elif clean_name == 'mult':
        clean_name = 'MULT'
    elif clean_name == 'mult_ood':
        clean_name = 'MULT (OOD)'
    elif clean_name == 'text_ood':
        clean_name = 'Text (OOD)'
    elif clean_name == 'text': # Added a case for 'results_text.csv'
        clean_name = 'Text'
    else:
        clean_name = clean_name.replace('_', ' ').title()
    df['source_file'] = clean_name
    dfs.append(df)

if not dfs:
    print("No data loaded. Exiting. Please check your CSV files and their paths.")
    exit()

data = pd.concat(dfs, ignore_index=True)

# --- MODIFICATION FOR eval_acc SCALING ---
# Check if 'eval_acc' column exists and scale it by 100
if 'eval_acc' in data.columns:
    # Ensure we are not scaling values that are already > 1 (e.g., already percentages)
    # This is a heuristic. If eval_acc can legitimately be > 1 for some other reason,
    # this condition might need adjustment. For scores 0-1, max should be <= 1.
    # It's safer to assume the user's statement is correct for all 'eval_acc' values.
    # If some are 0-1 and others 0-100, this will incorrectly scale the latter.
    # The prompt implies all 'eval_acc' are 0-1.
    data['eval_acc'] = data['eval_acc'] * 100
    print("Scaled 'eval_acc' metric by 100.")
else:
    print("'eval_acc' column not found in the combined data. No scaling applied to it.")
# --- END MODIFICATION ---


# Selecting key metrics for visualization
metrics_to_plot = ['acc', 'weighted_f1', 'f1', 'oid_acc', 'eval_acc']
# More concise labels for the x-axis if needed, or use full for clarity
metric_x_labels = ['Accuracy', 'Weighted F1', 'Macro F1', 'OID Acc.', 'Eval Acc.']
# Ensure the order of labels matches metrics_to_plot
metric_map = dict(zip(metrics_to_plot, metric_x_labels))


# Melt the DataFrame to long format
# Ensure all metrics in metrics_to_plot are present in data.columns, otherwise melt will error or behave unexpectedly
# If a metric is missing from ALL files, it won't be in data.columns.
# If it's missing from SOME files, it will be NaN for those rows.
existing_metrics_to_plot = [m for m in metrics_to_plot if m in data.columns]
if not all(m in existing_metrics_to_plot for m in metrics_to_plot):
    missing_from_all = set(metrics_to_plot) - set(existing_metrics_to_plot)
    print(f"Warning: The following metrics are not present in any loaded CSV and will be excluded from plot: {missing_from_all}")
    metrics_to_plot = existing_metrics_to_plot
    # Adjust metric_map and metric_x_labels accordingly if you want to be fully dynamic
    # For now, we proceed with potentially fewer metrics if some are globally missing.
    # If a metric is in metrics_to_plot but missing in data.columns, melt might raise an error for value_vars.

data_melted = data.melt(id_vars=['source_file'],
                        value_vars=existing_metrics_to_plot, # Use only metrics that exist
                        var_name='metric_type',
                        value_name='score')

# Map metric_type to the desired x-axis labels for correct ordering and display
data_melted['metric_type_labeled'] = data_melted['metric_type'].map(metric_map)

# Ensure the order of metric_type_labeled for plotting
# Filter metric_x_labels to only include those actually present after melting and mapping
actual_plotted_labels = [label for label in metric_x_labels if label in data_melted['metric_type_labeled'].unique()]
data_melted['metric_type_labeled'] = pd.Categorical(data_melted['metric_type_labeled'], categories=actual_plotted_labels, ordered=True)

# Drop rows where metric_type_labeled became NaN (e.g. if a metric in existing_metrics_to_plot was not in metric_map)
data_melted.dropna(subset=['metric_type_labeled', 'score'], inplace=True)


# Setting up the plot parameters for a CCF A paper
plt.rcParams.update({
    'font.family': 'Times New Roman',
    'font.size': 14,
    'axes.titlesize': 16,
    'axes.labelsize': 14,
    'xtick.labelsize': 12,
    'ytick.labelsize': 12,
    'legend.fontsize': 11,
    'figure.dpi': 300,
    'lines.linewidth': 2,
    'lines.markersize': 7
})

fig, ax = plt.subplots(figsize=(10, 6))

num_unique_sources = 0
if 'source_file' in data: # Check if source_file column exists (it should if data was loaded)
    num_unique_sources = data['source_file'].nunique()
else: # Should not happen if data loading was successful
    print("Warning: 'source_file' column missing from data. Legend might be affected.")


colors = sns.color_palette("Set2", max(1, num_unique_sources)) # Ensure at least 1 color


# Plotting line chart
if not data_melted.empty:
    sns.lineplot(
        data=data_melted,
        x='metric_type_labeled',
        y='score',
        hue='source_file',
        style='source_file',
        markers=True,
        dashes=False,
        palette=colors,
        ax=ax
    )
else:
    print("No data to plot after processing and melting. Plot will be empty.")


ax.set_ylabel('Score (%)')
ax.set_xlabel('Evaluation Metric')

plt.setp(ax.get_xticklabels(), rotation=15, ha='right', rotation_mode='anchor')
ax.tick_params(axis='x', pad=7)

ax.grid(True, axis='y', linestyle=':', alpha=0.7)
ax.grid(True, axis='x', linestyle=':', alpha=0.3)

if not data_melted['score'].empty:
    min_score = data_melted['score'].min()
    max_score = data_melted['score'].max()
    ax.set_ylim(max(0, min_score - 10) , min(100, max_score + 10))
else:
    ax.set_ylim(0, 100)

if num_unique_sources > 0 and not data_melted.empty:
    handles, labels = ax.get_legend_handles_labels()
    legend_ncol = min(3, num_unique_sources)
    fig.legend(handles, labels, loc='upper center', ncol=legend_ncol, bbox_to_anchor=(0.5, 1.0), title='Experiments')
    ax.get_legend().remove()
elif ax.get_legend() is not None: # If a legend was somehow created (e.g. by seaborn with no hue data)
    ax.get_legend().remove()


plt.tight_layout(rect=[0, 0, 1, 0.92 if num_unique_sources > 3 else 0.95])

output_filename = 'experiment_results_comparison_line.png'
plt.savefig(output_filename, dpi=300, bbox_inches='tight')
print(f"Plot saved as {output_filename}")
plt.show()