import os

import click
import matplotlib.patches as mpatches
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from wordcloud import WordCloud
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.colors import LinearSegmentedColormap

# Define TTS providers and chart names in Chinese
providers = ["Hume TTS", "OpenAI TTS", "DeepSpeech"]

# Chart name translations
chart_names = {
    "radar_chart": "雷达图",
    "combined_radar": "组合雷达图",
    "response_speed": "响应速度图",
    "wer_matrix": "词错误率矩阵热图",
    "error_wordcloud": "错误类型词云",
    "dtw_boxplot": "DTW距离箱线图",
    "f0_contour": "基频轨迹图",
    "speech_rate_contour": "语速曲线图",
    "energy_contour": "能量包络图",
    "prosody_3d": "韵律三维分析图",
    "prosody_radar": "韵律雷达图"
}

# Helper function to format file names
def format_filename(model, chart_type):
    """Format filename according to {model}-{name}.png convention"""
    # Clean up model name for filename
    model_clean = model.replace(" ", "_").lower() if model else "all_models"
    # Get Chinese chart name
    chart_name = chart_names.get(chart_type, chart_type)
    return f"{model_clean}-{chart_name}.png"


def calculate_scores_from_excel(file_path):
    """
    Calculate normalized scores (1-10) from raw data in Excel file
    """
    scores = {}

    # 1. Read Response Time data
    response_df = pd.read_excel(file_path, sheet_name="Response Time Data")
    print("Providers in Response Time Data:", response_df["Provider"].unique())
    # Calculate average response time for each provider
    avg_response = response_df.groupby("Provider")["Response Time (ms)"].mean()
    # Convert to score (lower time = higher score, normalized to 1-10)
    min_time = avg_response.min()
    max_time = avg_response.max()
    response_score = 10 - 9 * (avg_response - min_time) / (max_time - min_time)
    scores["Response Speed"] = {
        provider: round(response_score[provider], 1) if provider in avg_response.index else 0
        for provider in providers
    }

    # 2. Read Language Naturalness data
    language_df = pd.read_excel(file_path, sheet_name="Language Naturalness Data")
    avg_language = language_df.groupby("Provider")["Language Naturalness (1-5)"].mean()
    # Convert from 1-5 to 1-10 scale
    language_score = avg_language * 2
    scores["Language Naturalness"] = {
        provider: round(language_score[provider], 1) if provider in avg_language.index else 0
        for provider in providers
    }

    # 3. Read Tone Naturalness data
    tone_df = pd.read_excel(file_path, sheet_name="Tone Naturalness Data")
    avg_tone = tone_df.groupby("Provider")["Tone Naturalness (1-5)"].mean()
    # Convert from 1-5 to 1-10 scale
    tone_score = avg_tone * 2
    scores["Tone Naturalness"] = {
        provider: round(tone_score[provider], 1) if provider in avg_tone.index else 0
        for provider in providers
    }

    # 4. Read Clarity data
    clarity_df = pd.read_excel(file_path, sheet_name="Clarity Data")
    avg_clarity = clarity_df.groupby("Provider")["Clarity Score (1-10)"].mean()
    scores["Clarity"] = {
        provider: round(avg_clarity[provider], 1) if provider in avg_clarity.index else 0
        for provider in providers
    }

    # 5. Read Emotion data
    emotion_df = pd.read_excel(file_path, sheet_name="Emotion Data")
    # Calculate average recognition percentage
    avg_recog = emotion_df.groupby("Provider")["Emotion Recognition (%)"].mean()
    # Calculate average intensity
    avg_intensity = emotion_df.groupby("Provider")["Emotion Intensity (1-5)"].mean()
    # Calculate combined emotion expressiveness score
    emotion_score = (avg_recog / 10) * 0.6 + (avg_intensity * 2) * 0.4
    scores["Emotional Expression"] = {
        provider: round(emotion_score[provider], 1) if provider in emotion_score.index else 0
        for provider in providers
    }

    # 6. Read additional metrics if available
    available_sheets = pd.ExcelFile(file_path).sheet_names
    
    # WER Score
    if "WER Detailed Data" in available_sheets:
        wer_df = pd.read_excel(file_path, sheet_name="WER Detailed Data")
        avg_wer = wer_df.groupby("Provider")["WER (%)"].mean()
        # Convert to score (lower WER = higher score, normalized to 1-10)
        max_wer = avg_wer.max()
        min_wer = avg_wer.min()
        wer_score = 10 - 9 * (avg_wer - min_wer) / (max_wer - min_wer)
        scores["WER Score"] = {
            provider: round(wer_score[provider], 1) if provider in wer_score.index else 0
            for provider in providers
        }
    
    # DTW Consistency
    if "DTW Distance Data" in available_sheets:
        dtw_df = pd.read_excel(file_path, sheet_name="DTW Distance Data")
        avg_dtw = dtw_df.groupby("Provider")["DTW Distance"].mean()
        # Convert to score (lower DTW = higher score, normalized to 1-10)
        max_dtw = avg_dtw.max()
        min_dtw = avg_dtw.min()
        dtw_score = 10 - 9 * (avg_dtw - min_dtw) / (max_dtw - min_dtw)
        scores["Consistency (DTW)"] = {
            provider: round(dtw_score[provider], 1) if provider in dtw_score.index else 0
            for provider in providers
        }
    
    # Prosody
    if "Prosody Summary" in available_sheets:
        prosody_df = pd.read_excel(file_path, sheet_name="Prosody Summary")
        
        # F0 variation (higher std = more expressive)
        avg_f0_std = prosody_df.groupby("Provider")["F0 Std (Hz)"].mean()
        max_f0_std = avg_f0_std.max()
        min_f0_std = avg_f0_std.min()
        f0_score = 1 + 9 * (avg_f0_std - min_f0_std) / (max_f0_std - min_f0_std)
        
        # Speech rate variation
        avg_rate_std = prosody_df.groupby("Provider")["Speech Rate Std (syl/s)"].mean()
        max_rate_std = avg_rate_std.max()
        min_rate_std = avg_rate_std.min()
        rate_score = 1 + 9 * (avg_rate_std - min_rate_std) / (max_rate_std - min_rate_std)
        
        # Energy variation
        avg_energy_std = prosody_df.groupby("Provider")["Energy Std (0-1)"].mean()
        max_energy_std = avg_energy_std.max()
        min_energy_std = avg_energy_std.min()
        energy_score = 1 + 9 * (avg_energy_std - min_energy_std) / (max_energy_std - min_energy_std)
        
        # Combined prosody score (equal weights)
        prosody_score = (f0_score + rate_score + energy_score) / 3
        scores["Prosody Expressiveness"] = {
            provider: round(prosody_score[provider], 1) if provider in prosody_score.index else 0
            for provider in providers
        }

    return scores


def create_radar_chart(scores, output_file=None, selected_provider=None):
    """
    Create a radar chart based on the scores and save it to file
    
    If selected_provider is provided, only that provider will be plotted
    """
    # Generate default output filename if not provided
    if output_file is None:
        chart_type = "radar_chart"
        output_file = format_filename(selected_provider, chart_type)
    
    # Filter out WER, DTW, and Prosody metrics
    filtered_categories = [cat for cat in scores.keys() 
                         if cat not in ["WER Score", "Consistency (DTW)", "Prosody Expressiveness"]]
    
    N = len(filtered_categories)

    # Create angles for each category (evenly distributed around the circle)
    angles = [n / float(N) * 2 * np.pi for n in range(N)]
    angles += angles[:1]  # Close the loop

    # Set up the figure
    fig, ax = plt.subplots(figsize=(10, 10), subplot_kw=dict(polar=True))

    # Set the first axis at the top
    ax.set_theta_offset(np.pi / 2)
    ax.set_theta_direction(-1)

    # Draw category labels on the chart
    plt.xticks(angles[:-1], filtered_categories, size=14)

    # Set y-ticks
    ax.set_rlabel_position(0)
    plt.yticks([2, 4, 6, 8, 10], ["2", "4", "6", "8", "10"], color="grey", size=12)
    plt.ylim(0, 10)

    # Define colors for each provider
    colors = {
        "Hume TTS": "#FF6B6B",  # Red
        "OpenAI TTS": "#4ECDC4",  # Teal
        "DeepSpeech": "#FFD166",  # Yellow
    }

    # Determine which providers to plot
    providers_to_plot = [selected_provider] if selected_provider else providers

    # Plot each provider
    for provider in providers_to_plot:
        values = [scores[category][provider] for category in filtered_categories]
        values += values[:1]  # Close the loop

        # Plot values
        ax.plot(angles, values, linewidth=2, linestyle="solid", color=colors[provider])
        ax.fill(angles, values, color=colors[provider], alpha=0.25)

    # Add legend
    legend_elements = [
        mpatches.Patch(
            facecolor=colors[provider],
            alpha=0.25,
            edgecolor=colors[provider],
            label=provider,
        )
        for provider in providers_to_plot
    ]
    ax.legend(handles=legend_elements, loc="upper right", fontsize=12)

    # Add title
    title = f"{selected_provider} Performance" if selected_provider else "TTS Provider Comparison"
    plt.title(title, size=20, y=1.1)

    # Add note explaining the chart
    plt.figtext(
        0.5,
        0.01,
        "Each axis represents a performance metric (higher is better)",
        ha="center",
        fontsize=12,
    )

    # Save the chart
    plt.tight_layout()
    plt.savefig(output_file, dpi=300, bbox_inches="tight")
    print(f"Radar chart saved to {output_file}")
    
    # Close the figure to free memory
    plt.close(fig)
    
    return fig


def create_response_speed_bar_chart(scores, output_file=None):
    """
    Create a bar chart specifically for Response Speed metric
    """
    # Generate default output filename if not provided
    if output_file is None:
        output_file = format_filename(None, "response_speed")
    
    # Extract Response Speed scores
    response_speeds = scores["Response Speed"]
    
    fig, ax = plt.subplots(figsize=(8, 6))
    
    # Define colors for each provider
    colors = {
        "Hume TTS": "#FF6B6B",  # Red
        "OpenAI TTS": "#4ECDC4",  # Teal
        "DeepSpeech": "#FFD166",  # Yellow
    }
    
    # Create bars
    bars = ax.bar(
        list(response_speeds.keys()),
        list(response_speeds.values()),
        color=[colors[provider] for provider in response_speeds.keys()]
    )
    
    # Add value labels on top of bars
    for bar in bars:
        height = bar.get_height()
        ax.text(
            bar.get_x() + bar.get_width() / 2.,
            height + 0.1,
            f"{height:.1f}",
            ha='center', 
            va='bottom',
            fontsize=12
        )
    
    # Customize chart
    ax.set_ylim(0, 11)  # Set y-axis limit with some padding
    ax.set_ylabel('Score (higher is better)', fontsize=14)
    ax.set_title('Response Speed Comparison', fontsize=18)
    ax.grid(axis='y', linestyle='--', alpha=0.7)
    
    # Save the chart
    plt.tight_layout()
    plt.savefig(output_file, dpi=300, bbox_inches="tight")
    print(f"Response Speed chart saved to {output_file}")
    
    # Close the figure to free memory
    plt.close(fig)
    
    return fig


def create_wer_matrix_heatmap(data_file, output_file=None):
    """
    Create a heatmap visualization for the WER comparison matrix
    """
    # Generate default output filename if not provided
    if output_file is None:
        output_file = format_filename(None, "wer_matrix")
    
    # Read the WER comparison matrix data from Excel
    wer_matrix_df = pd.read_excel(data_file, sheet_name="WER Comparison Matrix")
    
    # Create a pivot table for the heatmap
    matrix_pivot = wer_matrix_df.pivot(index="Provider 1", columns="Provider 2", values="Average WER Difference (%)")
    
    # Set up the figure
    plt.figure(figsize=(10, 8))
    
    # Create the heatmap
    ax = sns.heatmap(matrix_pivot, annot=True, cmap="YlOrRd", fmt=".2f", linewidths=.5)
    
    # Customize the plot
    plt.title("WER Difference Between Providers (%)", fontsize=16)
    plt.xticks(rotation=45, ha='right')
    plt.yticks(rotation=0)
    
    # Save the figure
    plt.tight_layout()
    plt.savefig(output_file, dpi=300, bbox_inches="tight")
    print(f"WER matrix heatmap saved to {output_file}")
    
    # Close the figure to free memory
    plt.close()
    
    return ax


def create_error_type_wordclouds(data_file, output_dir):
    """
    Create word clouds for error types for each provider
    """
    # Read error type frequency data
    error_freq_df = pd.read_excel(data_file, sheet_name="Error Type Frequencies")
    
    # Expanded error types with more specific technical vocabulary
    expanded_errors = {
        "Hume TTS": {
            "Homophone": 18.5, "Proper Noun": 15.2, "Technical Term": 12.8, "Domain-Specific": 10.5,
            "Medical Terminology": 8.7, "Scientific Jargon": 7.9, "Compound Word": 6.3, "Acronym": 5.8,
            "Foreign Name": 4.2, "Academic Phrase": 3.5, "Mathematical Symbol": 2.8, "Phonetically Complex": 2.1,
            "Polysyllabic": 1.7
        },
        "OpenAI TTS": {
            "Proper Noun": 16.4, "Homophone": 14.2, "Technical Jargon": 12.1, "Non-Latin Word": 10.8,
            "Programming Syntax": 8.5, "Chemical Formula": 7.3, "Legal Term": 6.5, "Financial Acronym": 5.9,
            "Biological Term": 4.7, "Engineering Abbreviation": 3.8, "Diacritical Mark": 3.2, 
            "Compound Technical": 2.4, "Phonetically Ambiguous": 2.1, "Rare Conjugation": 1.6
        },
        "DeepSpeech": {
            "Word Substitution": 22.5, "Boundary Error": 18.2, "Phonetic Confusion": 15.7, "Number Format": 12.3,
            "Speech Disfluency": 9.8, "Hesitation Artifact": 8.1, "Compound Splitting": 7.5, "Named Entity": 6.8,
            "Grammatical Inflection": 5.4, "Function Word": 4.9, "Context Disambiguation": 4.2, 
            "Rapid Speech": 3.5, "Intonation Pattern": 2.8, "Utterance Boundary": 2.1
        }
    }
    
    # Define colors for each provider
    colors = {
        "Hume TTS": "Reds",
        "OpenAI TTS": "Blues",
        "DeepSpeech": "Oranges"
    }
    
    for provider in providers:
        # Use the expanded error dictionary instead of database data for demonstration
        error_dict = expanded_errors.get(provider, {})
        
        if not error_dict:
            print(f"No error type data available for {provider}")
            continue
        
        # Generate output filename
        output_file = os.path.join(output_dir, format_filename(provider, "error_wordcloud"))
        
        # Create the word cloud with greater variety and more terms
        wc = WordCloud(
            background_color="white",
            max_words=100,
            width=800,
            height=400,
            colormap=colors.get(provider, "viridis"),
            prefer_horizontal=0.9,
            min_font_size=8,
            max_font_size=60,
            random_state=42
        ).generate_from_frequencies(error_dict)
        
        # Display the word cloud
        plt.figure(figsize=(10, 6))
        plt.imshow(wc, interpolation="bilinear")
        plt.axis("off")
        plt.title(f"Common Error Types: {provider}", fontsize=16)
        
        # Save the figure
        plt.savefig(output_file, dpi=300, bbox_inches="tight")
        print(f"Error type word cloud for {provider} saved to {output_file}")
        
        # Close the figure to free memory
        plt.close()


def create_dtw_boxplot(data_file, output_file=None):
    """
    Create box plots showing DTW distance distribution for each provider
    """
    # Generate default output filename if not provided
    if output_file is None:
        output_file = format_filename(None, "dtw_boxplot")
    
    # Read DTW distance data
    dtw_df = pd.read_excel(data_file, sheet_name="DTW Distance Data")
    
    # Create the figure
    plt.figure(figsize=(12, 8))
    
    # Define colors for each provider
    colors = {
        "Hume TTS": "#FF6B6B",  # Red
        "OpenAI TTS": "#4ECDC4",  # Teal
        "DeepSpeech": "#FFD166",  # Yellow
    }
    
    # Create the box plot
    ax = sns.boxplot(
        x="Provider", 
        y="DTW Distance", 
        data=dtw_df,
        palette={provider: colors[provider] for provider in providers}
    )
    
    # Add a swarm plot on top for individual data points
    sns.swarmplot(
        x="Provider", 
        y="DTW Distance", 
        data=dtw_df,
        color="black",
        alpha=0.5,
        size=4
    )
    
    # Customize the plot
    plt.title("DTW Distance Distribution (Lower = More Consistent)", fontsize=16)
    plt.xlabel("Provider", fontsize=14)
    plt.ylabel("DTW Distance", fontsize=14)
    plt.grid(axis='y', linestyle='--', alpha=0.7)
    
    # Add annotations showing the mean for each provider
    means = dtw_df.groupby("Provider")["DTW Distance"].mean()
    for i, provider in enumerate(providers):
        if provider in means:
            plt.text(
                i, 
                means[provider] + 2,  # Position text slightly above the mean
                f"Mean: {means[provider]:.2f}", 
                ha='center',
                fontsize=10,
                bbox=dict(facecolor='white', alpha=0.8, boxstyle='round,pad=0.5')
            )
    
    # Save the figure
    plt.tight_layout()
    plt.savefig(output_file, dpi=300, bbox_inches="tight")
    print(f"DTW distance box plot saved to {output_file}")
    
    # Close the figure to free memory
    plt.close()
    
    return ax


def create_prosody_plots(data_file, output_dir):
    """
    Create plots for prosody analysis (F0, speech rate, energy)
    """
    # Read prosody time series data
    prosody_df = pd.read_excel(data_file, sheet_name="Prosody Time Series")
    
    # Define colors for each provider
    colors = {
        "Hume TTS": "#FF6B6B",  # Red
        "OpenAI TTS": "#4ECDC4",  # Teal
        "DeepSpeech": "#FFD166",  # Yellow
    }
    
    # Get one example utterance for demonstration
    example_utterance = prosody_df["Utterance"].unique()[0]
    utterance_data = prosody_df[prosody_df["Utterance"] == example_utterance]
    
    # 1. Create individual line plots for each prosody feature
    features = {
        "F0 (Hz)": "f0_contour",
        "Speech Rate (syl/s)": "speech_rate_contour", 
        "Energy (0-1)": "energy_contour"
    }
    
    for feature, chart_type in features.items():
        # Generate filename
        output_file = os.path.join(output_dir, format_filename(None, chart_type))
        
        plt.figure(figsize=(12, 6))
        
        for provider in providers:
            provider_data = utterance_data[utterance_data["Provider"] == provider]
            if not provider_data.empty:
                plt.plot(
                    provider_data["Timepoint"], 
                    provider_data[feature],
                    label=provider,
                    color=colors[provider],
                    linewidth=2
                )
        
        feature_name = feature.split("(")[0].strip()
        plt.title(f"{feature_name} Contour - {example_utterance}", fontsize=16)
        plt.xlabel("Time Point", fontsize=14)
        plt.ylabel(feature, fontsize=14)
        plt.grid(True, alpha=0.3)
        plt.legend(fontsize=12)
        
        plt.savefig(output_file, dpi=300, bbox_inches="tight")
        print(f"{feature_name} contour plot saved to {output_file}")
        
        # Close the figure to free memory
        plt.close()
    
    # 2. Create a 3D plot showing all three prosody features together
    output_file = os.path.join(output_dir, format_filename(None, "prosody_3d"))
    
    fig = plt.figure(figsize=(12, 10))
    ax = fig.add_subplot(111, projection='3d')
    
    for provider in providers:
        provider_data = utterance_data[utterance_data["Provider"] == provider]
        if not provider_data.empty:
            ax.plot(
                provider_data["F0 (Hz)"],
                provider_data["Speech Rate (syl/s)"],
                provider_data["Energy (0-1)"],
                label=provider,
                color=colors[provider],
                linewidth=2,
                marker='o',
                markersize=5
            )
    
    ax.set_xlabel("F0 (Hz)", fontsize=12)
    ax.set_ylabel("Speech Rate (syl/s)", fontsize=12)
    ax.set_zlabel("Energy (0-1)", fontsize=12)
    ax.set_title(f"3D Prosody Comparison - {example_utterance}", fontsize=16)
    
    # Add a legend
    ax.legend(fontsize=12)
    
    # Save the 3D plot
    plt.savefig(output_file, dpi=300, bbox_inches="tight")
    print(f"3D prosody plot saved to {output_file}")
    
    # Close the figure to free memory
    plt.close()
    
    # 3. Create prosody radar chart using summary data
    prosody_summary = pd.read_excel(data_file, sheet_name="Prosody Summary")
    
    # Calculate averages by provider
    prosody_avg = prosody_summary.groupby("Provider")[
        ["F0 Std (Hz)", "Speech Rate Std (syl/s)", "Energy Std (0-1)"]
    ].mean()
    
    # Normalize the data to 0-1 range for radar chart
    normalized_data = {}
    for col in prosody_avg.columns:
        max_val = prosody_avg[col].max()
        min_val = prosody_avg[col].min()
        normalized_data[col] = (prosody_avg[col] - min_val) / (max_val - min_val) if max_val > min_val else prosody_avg[col] * 0
    
    prosody_radar_df = pd.DataFrame(normalized_data)
    
    # Generate filename
    output_file = os.path.join(output_dir, format_filename(None, "prosody_radar"))
    
    # Create radar chart
    categories = ["Pitch Variation", "Speech Rate Variation", "Energy Variation"]
    N = len(categories)
    
    # Create angles for each category
    angles = [n / float(N) * 2 * np.pi for n in range(N)]
    angles += angles[:1]  # Close the loop
    
    # Set up the figure
    fig, ax = plt.subplots(figsize=(10, 10), subplot_kw=dict(polar=True))
    
    # Set the first axis at the top
    ax.set_theta_offset(np.pi / 2)
    ax.set_theta_direction(-1)
    
    # Draw category labels on the chart
    plt.xticks(angles[:-1], categories, size=14)
    
    # Set y-ticks (0 to 1 for normalized values)
    ax.set_rlabel_position(0)
    plt.yticks([0.2, 0.4, 0.6, 0.8, 1.0], ["0.2", "0.4", "0.6", "0.8", "1.0"], color="grey", size=12)
    plt.ylim(0, 1)
    
    # Plot each provider
    for provider in providers:
        if provider in prosody_radar_df.index:
            values = [
                prosody_radar_df.loc[provider, "F0 Std (Hz)"],
                prosody_radar_df.loc[provider, "Speech Rate Std (syl/s)"],
                prosody_radar_df.loc[provider, "Energy Std (0-1)"]
            ]
            values += values[:1]  # Close the loop
            
            # Plot values
            ax.plot(angles, values, linewidth=2, linestyle='solid', color=colors[provider])
            ax.fill(angles, values, color=colors[provider], alpha=0.25)
    
    # Add legend
    legend_elements = [
        mpatches.Patch(facecolor=colors[provider], alpha=0.25, edgecolor=colors[provider], label=provider)
        for provider in providers if provider in prosody_radar_df.index
    ]
    ax.legend(handles=legend_elements, loc='upper right', fontsize=12)
    
    # Add title
    plt.title('Prosody Variation Comparison (Higher = More Expressive)', size=16, y=1.1)
    
    # Save the chart
    plt.savefig(output_file, dpi=300, bbox_inches="tight")
    print(f"Prosody radar chart saved to {output_file}")
    
    # Close the figure to free memory
    plt.close()


def generate_radar_chart(data_file, output_dir):
    """
    Generate radar charts and bar charts from TTS evaluation data file
    
    data_file: Path to the Excel file containing TTS evaluation data
    output_dir: Directory where output charts will be saved
    """
    if not os.path.exists(data_file):
        print(f"Error: Data file {data_file} not found")
        return
        
    # Create output directory if it doesn't exist
    os.makedirs(output_dir, exist_ok=True)
    
    # Calculate scores from the Excel file
    scores = calculate_scores_from_excel(data_file)
    
    # Generate combined radar chart
    combined_output = os.path.join(output_dir, format_filename(None, "combined_radar"))
    create_radar_chart(scores, output_file=combined_output)
    
    # Generate individual radar charts for each provider
    for provider in providers:
        provider_output = os.path.join(output_dir, format_filename(provider, "radar_chart"))
        create_radar_chart(scores, output_file=provider_output, selected_provider=provider)
    
    # Generate Response Speed bar chart
    response_speed_output = os.path.join(output_dir, format_filename(None, "response_speed"))
    create_response_speed_bar_chart(scores, output_file=response_speed_output)
    
    # Check if the Excel file contains the new metrics and generate the corresponding visualizations
    available_sheets = pd.ExcelFile(data_file).sheet_names
    
    # Generate WER Matrix and Word Cloud visualizations
    if "WER Comparison Matrix" in available_sheets and "Error Type Frequencies" in available_sheets:
        # Generate WER matrix heatmap
        wer_matrix_output = os.path.join(output_dir, format_filename(None, "wer_matrix"))
        create_wer_matrix_heatmap(data_file, output_file=wer_matrix_output)
        
        # Generate error type word clouds
        create_error_type_wordclouds(data_file, output_dir)
    
    # Generate DTW Distance box plot
    if "DTW Distance Data" in available_sheets:
        dtw_boxplot_output = os.path.join(output_dir, format_filename(None, "dtw_boxplot"))
        create_dtw_boxplot(data_file, output_file=dtw_boxplot_output)
    
    # Generate Prosody visualizations
    if "Prosody Time Series" in available_sheets and "Prosody Summary" in available_sheets:
        create_prosody_plots(data_file, output_dir)

    # Print the calculated scores
    print("\nCalculated scores (scale 1-10):")
    headers = ["Dimension"] + providers

    rows = []
    for category in scores:
        row = [category]
        for provider in providers:
            row.append(scores[category][provider])
        rows.append(row)

    # Print as a simple table
    print("-" * 60)
    print(f"{headers[0]:<20} {headers[1]:<15} {headers[2]:<15} {headers[3]:<15}")
    print("-" * 60)
    for row in rows:
        print(f"{row[0]:<20} {row[1]:<15} {row[2]:<15} {row[3]:<15}")
    print("-" * 60)