"""
Quantum Readout Error Mitigation Module

This module implements Matrix Inversion (MI) technique for quantum readout error mitigation.
It handles data loading, processing, and analysis of quantum measurement results.

Required CSV columns:
    - qubits: List of qubit indices used in the experiment
    - true_value: True quantum state
    - exp_result: Measurement results dictionary
"""

import numpy as np
import pandas as pd
import json
from typing import Dict, List, Tuple, Union
import matplotlib.pyplot as plt
import seaborn as sns
import os
import io
import sys
import re

# *************** Data Loading Functions ***************

def load_experimental_data(filepath: str) -> pd.DataFrame:
    """
    Load experimental data from CSV file, selecting only the required columns.
    
    Args:
        filepath: Path to the CSV file containing quantum experiment results
        
    Returns:
        DataFrame containing only the required columns (qubits, true_value, exp_result)
    """
    # Only load the columns we need
    required_columns = ['qubits', 'true_value', 'exp_result']
    
    try:
        df = pd.read_csv(filepath, usecols=required_columns, dtype=str)
        return df
    except ValueError as e:
        raise ValueError(f"CSV file must contain the following columns: {required_columns}. Error: {str(e)}")
    except Exception as e:
        raise Exception(f"Error loading data: {str(e)}")


def parse_experiment_results(df: pd.DataFrame) -> List[Dict]:
    """
    Parse experimental results from DataFrame into list of dictionaries.
    
    Args:
        df: DataFrame containing experimental results (must have 'exp_result' column)
        
    Returns:
        List of dictionaries containing parsed experimental data for each row
    """
    return [json.loads(row['exp_result']) for _, row in df.iterrows()]

# *************** Helper Functions ***************

def number_to_bitstring(num_qubits: int, number: int, reverse: bool = False) -> str:
    """
    Convert integer to binary bit string representation.
    
    Args:
        num_qubits: Number of qubits in the system
        number: Integer to convert
        reverse: If True, MSB will be on left side
        
    Returns:
        Binary string representation
    """
    bitstring = bin(number)[2:].zfill(num_qubits)
    return bitstring[::-1] if reverse else bitstring

def measurement_dict_to_vector(num_qubits: int, measurement_dict: Dict[str, float]) -> np.ndarray:
    """
    Convert a measurement dictionary to a probability vector.
    
    Args:
        num_qubits: Number of qubits
        measurement_dict: Dictionary mapping bitstrings to probabilities
        
    Returns:
        Probability vector
    """
    vector = np.zeros(2**num_qubits)
    
    # Handle empty or None dictionary
    if not measurement_dict:
        return vector
    
    try:
        for bitstring, prob in measurement_dict.items():
            # Skip invalid bitstrings
            if not all(bit in '01' for bit in bitstring):
                print(f"Warning: Skipping invalid bitstring '{bitstring}'")
                continue
                
            # Handle bitstrings that are too short or too long
            if len(bitstring) != num_qubits:
                if len(bitstring) < num_qubits:
                    # Pad with zeros if too short
                    bitstring = bitstring.zfill(num_qubits)
                    print(f"Warning: Bitstring was too short, padded to '{bitstring}'")
                else:
                    # Truncate if too long
                    bitstring = bitstring[-num_qubits:]
                    print(f"Warning: Bitstring was too long, truncated to '{bitstring}'")
            
            # Convert bitstring to index
            idx = int(bitstring, 2)
            
            # Add probability to vector
            vector[idx] = prob
    except Exception as e:
        print(f"Error converting measurement dictionary to vector: {str(e)}")
        print(f"Dictionary: {measurement_dict}")
    
    # Normalize vector if it doesn't sum to 1
    vector_sum = np.sum(vector)
    if not np.isclose(vector_sum, 1.0, atol=1e-5) and vector_sum > 0:
        vector = vector / vector_sum
        print(f"Warning: Measurement vector was normalized. Original sum: {vector_sum:.6f}")
    
    return vector

def measurement_dict_to_vector_majority(groups: List[List[int]], results_dict: Dict) -> np.ndarray:
    """
    Convert measurement dictionary to probability vector using majority voting.
    
    Args:
        groups: List of qubit groups, where each group represents physical qubits encoding one logical qubit
        results_dict: Dictionary containing measurement results
        
    Returns:
        Probability vector for logical qubits
    """
    num_logical_qubits = len(groups)
    vector_size = 2**num_logical_qubits
    prob_vector = np.zeros(vector_size)
    
    # Handle empty or None dictionary
    if not results_dict:
        return prob_vector
    
    try:
        for measured, prob in results_dict.items():
            # Skip invalid bitstrings
            if not all(bit in '01' for bit in measured):
                print(f"Warning: Skipping invalid bitstring '{measured}'")
                continue
                
            # Convert measured state to logical state by majority voting
            logical_measured = ""
            start_idx = 0
            valid_measurement = True
            
            for group in groups:
                group_size = len(group)
                # Handle case where measured string is too short
                if start_idx + group_size > len(measured):
                    print(f"Warning: Measured string '{measured}' is too short for group {group}")
                    valid_measurement = False
                    break
                    
                group_measured = measured[start_idx:start_idx + group_size]
                num_ones = group_measured.count('1')
                num_zeros = len(group_measured) - num_ones
                
                # Handle tie case (equal number of 0s and 1s)
                if num_ones == num_zeros:
                    # Skip this measurement entirely as it's ambiguous
                    valid_measurement = False
                    break
                
                logical_measured += '1' if num_ones > num_zeros else '0'
                start_idx += group_size
            
            # Only update vector if we have a valid measurement
            if valid_measurement:
                idx = int(logical_measured, 2)
                prob_vector[idx] += prob
    except Exception as e:
        print(f"Error converting measurement dictionary to vector with majority voting: {str(e)}")
        print(f"Dictionary: {results_dict}")
    
    # Normalize vector if it doesn't sum to 1
    vector_sum = np.sum(prob_vector)
    if not np.isclose(vector_sum, 1.0, atol=1e-5) and vector_sum > 0:
        prob_vector = prob_vector / vector_sum
        print(f"Warning: Measurement vector was normalized. Original sum: {vector_sum:.6f}")
    
    return prob_vector


def construct_response_matrix(groups: List[List[int]], df: pd.DataFrame) -> np.ndarray:
    """
    Construct the response matrix for error mitigation.
    
    Args:
        groups: List of qubit groups
        df: DataFrame containing calibration data
        
    Returns:
        Response matrix R
    """
    # Initialize list to store response matrices for each group
    response_matrices = []
    
    # Process each group
    for group in groups:
        # Create response matrix for current group
        group_size = len(group)
        R = np.zeros((2**group_size, 2**group_size))
        
        # Get data for current group
        group_data = df.loc[(df['qubits'] == json.dumps(group))]
        
        # Fill response matrix
        for i, row in group_data.iterrows():
            true_state = number_to_bitstring(group_size, i)
            results = json.loads(row['exp_result'])
            for measured, prob in results.items():
                R[int(measured, 2), int(true_state, 2)] = prob
                
        response_matrices.append(R)
    
    # Combine all response matrices using Kronecker product
    final_R = response_matrices[0]
    for R in response_matrices[1:]:
        final_R = np.kron(final_R, R)
        
    return final_R

def construct_response_matrix_majority(groups: List[List[int]], df: pd.DataFrame, normalized: bool=True) -> np.ndarray:
    """
    Construct the response matrix using majority voting for error mitigation.
    The response matrix dimension matches the number of logical qubits.
    
    Args:
        groups: List of qubit groups, where each group represents physical qubits encoding one logical qubit
        df: DataFrame containing calibration data
        normalized: Whether to normalize the response matrix columns (default: True)
        
    Returns:
        Response matrix R constructed using majority voting, with dimension 2^k x 2^k,
        where k is the number of logical qubits
    """
    # Initialize response matrix for logical qubits
    num_logical_qubits = len(groups)
    R = np.zeros((2**num_logical_qubits, 2**num_logical_qubits))
    
    # Process each row in calibration data
    for i, row in df.iterrows():
        # Get true state and measured results
        if pd.isna(row['true_value']):
            true_state = number_to_bitstring(num_logical_qubits, i)
        else:
            true_state = "".join([c for c in row['true_value'] if c != '2'])
        
        # Handle different formats of exp_result
        if isinstance(row['exp_result'], str):
            try:
                # Try to parse as JSON
                results = json.loads(row['exp_result'].replace("'", '"'))
            except json.JSONDecodeError:
                print(f"Warning: Could not parse exp_result as JSON: {row['exp_result']}")
                continue
        else:
            print(f"Warning: exp_result is not a string: {type(row['exp_result'])}")
            continue
        
        # Convert true state to logical state by majority voting for each group
        start_idx = 0
        logical_true = ""
        for group in groups:
            group_size = len(group)
            group_state = true_state[start_idx:start_idx + group_size]
            num_ones = group_state.count('1')
            num_zeros = len(group_state) - num_ones
            logical_true += '1' if num_ones > num_zeros else '0'
            start_idx += group_size
            
        # Process measured results
        for measured, prob in results.items():
            # Convert measured state to logical state by majority voting
            logical_measured = ""
            start_idx = 0
            valid_measurement = True
            
            for group in groups:
                group_size = len(group)
                group_measured = measured[start_idx:start_idx + group_size]
                num_ones = group_measured.count('1')
                num_zeros = len(group_measured) - num_ones
                
                # Handle tie case (equal number of 0s and 1s)
                if num_ones == num_zeros:
                    # Skip this measurement entirely as it's ambiguous
                    valid_measurement = False
                    break
                
                logical_measured += '1' if num_ones > num_zeros else '0'
                start_idx += group_size
                
            # Only update response matrix if we have a valid measurement
            if valid_measurement:
                R[int(logical_measured, 2), int(logical_true, 2)] += prob
    
    # Normalize each column to ensure probabilities sum to 1 (if requested)
    if normalized:
        col_sums = R.sum(axis=0)
        # Avoid division by zero
        col_sums = np.where(col_sums > 0, col_sums, 1)
        R = R / col_sums[np.newaxis, :]
            
    return R


# *************** Matrix Inversion method for Error Mitigation ***************
def matrix_inversion_mitigation(measured_vector: np.ndarray, response_matrix: np.ndarray) -> np.ndarray:
    """
    Perform matrix inversion for error mitigation.
    
    Args:
        measured_vector: Vector of measured probabilities
        response_matrix: Response matrix R
        
    Returns:
        Mitigated probability vector
    """
    return np.ravel(np.matmul(np.linalg.inv(response_matrix), measured_vector))

def IBU(ymes: np.ndarray, t0: np.ndarray, Rin: np.ndarray, n: int) -> np.ndarray:
    """
    Perform Iterative Bayesian Unfolding method for error mitigation.
    
    Args:
        ymes: Measured probability distribution
        t0: Initial guess for true distribution
        Rin: Response matrix (measured vs true)
        n: Number of iterations
    
    Returns:
        Mitigated probability distribution
    """
    tn = t0
    for _ in range(n):
        out = np.zeros(t0.shape)
        for j in range(len(t0)):
            mynum = 0.
            for i in range(len(ymes)):
                myden = sum(Rin[i][k] * tn[k] for k in range(len(t0)))
                if myden > 0:
                    mynum += Rin[i][j] * tn[j] * ymes[i] / myden
            out[j] = mynum
        tn = out
    return tn

# *************** Analysis Functions ***************
def TVD(p: np.ndarray, q: np.ndarray) -> float:
    """
    Calculate the Total Variation Distance between two probability distributions.
    
    Args:
        p: First probability distribution
        q: Second probability distribution
        
    Returns:
        Total Variation Distance between p and q
    """
    # Ensure inputs are valid probability distributions
    if not (np.isclose(np.sum(p), 1.0, atol=1e-5) and np.isclose(np.sum(q), 1.0, atol=1e-5)):
        # Normalize if needed
        p_sum = np.sum(p)
        q_sum = np.sum(q)
        
        if p_sum > 0:
            p = p / p_sum
        else:
            p = np.ones_like(p) / len(p)  # Uniform distribution if sum is 0
            
        if q_sum > 0:
            q = q / q_sum
        else:
            q = np.ones_like(q) / len(q)  # Uniform distribution if sum is 0
            
        print(f"Warning: Input distributions were normalized. Original sums: p={p_sum:.6f}, q={q_sum:.6f}")
    
    # Handle negative values (which shouldn't occur in probability distributions)
    if np.any(p < 0) or np.any(q < 0):
        p = np.clip(p, 0, 1)
        q = np.clip(q, 0, 1)
        # Re-normalize after clipping
        p = p / np.sum(p)
        q = q / np.sum(q)
        print("Warning: Negative probabilities were clipped to 0 and distributions were renormalized")
    
    # Calculate TVD
    return 0.5 * np.sum(np.abs(p - q))

def Accuracy(P: Union[np.ndarray, Dict[str, float]], Q: Union[np.ndarray, Dict[str, float]]) -> float:
    """
    Calculate Accuracy between two probability distributions.
    """
    return 1 - TVD(P, Q) / 2

# *************** Main Execution ***************

# one-qubit test with qem method with different shot counts
def task1(shots: int = 10000):
    # Configuration
    NUM_QUBITS = 1
    VECTOR_SIZE = 2**NUM_QUBITS
    QUBIT_GROUPS = [[56]]
    IBU_ITERATIONS = 1
    
    # Load and process data
    test_df = load_experimental_data(f"data/data_20250223/one-qubit_h_test_shots/one-qubit_test_shots_{shots}shots.csv")
    test_dict = parse_experiment_results(test_df)[0]
    one_qubit_benchmark_df = load_experimental_data(f"data/data_20250223/one-qubit_benchmark/one-qubit_benchmark_{shots}shots.csv")
    ymes = measurement_dict_to_vector(NUM_QUBITS, test_dict)
    
    # Construct response matrix
    response_matrix = construct_response_matrix(QUBIT_GROUPS, one_qubit_benchmark_df)
    
    # Perform error mitigation
    # *************** Matrix Inversion method for Error Mitigation ***************
    mitigated_vector_via_MI = matrix_inversion_mitigation(ymes, response_matrix)
    
    # *************** Iterative Bayesian Unfolding method for Error Mitigation ***************
    mitigated_vector_via_IBU = IBU(ymes, np.ones(VECTOR_SIZE) / VECTOR_SIZE, response_matrix, IBU_ITERATIONS)
    
    # Calculate TVD
    true_probs = {'0':0.5, '1':0.5}
    TVD_via_nothing = TVD(ymes, measurement_dict_to_vector(NUM_QUBITS, true_probs))
    TVD_via_MI = TVD(mitigated_vector_via_MI, measurement_dict_to_vector(NUM_QUBITS, true_probs))
    TVD_via_IBU = TVD(mitigated_vector_via_IBU, measurement_dict_to_vector(NUM_QUBITS, true_probs))
    print(f'task 1 TVD via nothing, shots: {shots}, Total Variation Distance: {TVD_via_nothing}')
    print(f"task 1 TVD via MI, shots: {shots}, Total Variation Distance: {TVD_via_MI}")
    print(f"task 1 TVD via IBU, shots: {shots}, Total Variation Distance: {TVD_via_IBU}")
    

# 2-qubit qec test with qem method and 2-qubit mh code with different shot counts
def task2(shots: int = 10000):
    # Configuration
    LOGICAL_NUM_QUBITS = 1
    VECTOR_SIZE = 2
    QUBIT_GROUPS = [[56, 57]]
    IBU_ITERATIONS = 1
    
    # Load and process data
    test_df = load_experimental_data(f"data/data_20250223/two-qubit_h_test_shots/two-qubit_test_shots_{shots}shots.csv")
    test_dict = parse_experiment_results(test_df)[0]
    two_qubit_qec_benchmark_df = load_experimental_data(f"data/data_20250223/one-qubit_benchmark_qec/two-qubit_benchmark_qec_{shots}shots.csv")
    ymes =  measurement_dict_to_vector_majority(QUBIT_GROUPS, test_dict)
    
    print(two_qubit_qec_benchmark_df)
    # Construct response matrix
    response_matrix = construct_response_matrix_majority(QUBIT_GROUPS, two_qubit_qec_benchmark_df)
    
    # Perform error mitigation
    # *************** Matrix Inversion method for Error Mitigation ***************
    mitigated_vector_via_MI = matrix_inversion_mitigation(ymes, response_matrix)
    
    # *************** Iterative Bayesian Unfolding method for Error Mitigation ***************
    mitigated_vector_via_IBU = IBU(ymes, np.ones(VECTOR_SIZE) / VECTOR_SIZE, response_matrix, IBU_ITERATIONS)
    
    # Calculate TVD 
    true_probs = {'0':0.5, '1':0.5}
    TVD_via_nothing = TVD(ymes, measurement_dict_to_vector(LOGICAL_NUM_QUBITS, true_probs))
    TVD_via_MI = TVD(mitigated_vector_via_MI, measurement_dict_to_vector(LOGICAL_NUM_QUBITS, true_probs))
    TVD_via_IBU = TVD(mitigated_vector_via_IBU, measurement_dict_to_vector(LOGICAL_NUM_QUBITS, true_probs))
    
    # Clean print statements
    print(f'task 2 TVD via nothing, shots: {shots}, Total Variation Distance: {TVD_via_nothing:.4f}')
    print(f"task 2 TVD via MI, shots: {shots}, Total Variation Distance: {TVD_via_MI:.4f}")
    print(f"task 2 TVD via IBU, shots: {shots}, Total Variation Distance: {TVD_via_IBU:.4f}")

# 3-qubit qec test with qem method and repetition code with different shot counts
def task3(shots: int = 10000):
    # Configuration
    LOGICAL_NUM_QUBITS = 1
    VECTOR_SIZE = 2
    QUBIT_GROUPS = [[56, 55, 57]]
    IBU_ITERATIONS = 1

    # Load and process data
    test_df = load_experimental_data(f"data/data_20250223/three-qubit_repetition_code_hadamard/three-qubit_repetition_code_hadamard_{shots}shots.csv")
    test_dict = parse_experiment_results(test_df)[0]
    three_qubit_qec_benchmark_df = load_experimental_data(f"data/data_20250223/one-qubit_benchmark_qec/three-qubit_benchmark_qec_{shots}shots.csv")
    # three_qubit_qec_benchmark_df = load_experimental_data(f"data/data_20250223/one-qubit_benchmark/one-qubit_benchmark_{shots}shots.csv")
    ymes = measurement_dict_to_vector_majority(QUBIT_GROUPS, test_dict)

    # Construct response matrix
    response_matrix = construct_response_matrix_majority(QUBIT_GROUPS, three_qubit_qec_benchmark_df)

    # Perform error mitigation
    # *************** Matrix Inversion method for Error Mitigation ***************
    mitigated_vector_via_MI = matrix_inversion_mitigation(ymes, response_matrix)

    # *************** Iterative Bayesian Unfolding method for Error Mitigation ***************
    mitigated_vector_via_IBU = IBU(ymes, np.ones(VECTOR_SIZE) / VECTOR_SIZE, response_matrix, IBU_ITERATIONS)

    # Calculate TVD
    true_probs = {'0':0.5, '1':0.5}
    TVD_via_nothing = TVD(ymes, measurement_dict_to_vector(LOGICAL_NUM_QUBITS, true_probs))
    TVD_via_MI = TVD(mitigated_vector_via_MI, measurement_dict_to_vector(LOGICAL_NUM_QUBITS, true_probs))
    TVD_via_IBU = TVD(mitigated_vector_via_IBU, measurement_dict_to_vector(LOGICAL_NUM_QUBITS, true_probs))
    print(f'task 3 TVD via nothing, shots: {shots}, Total Variation Distance: {TVD_via_nothing}')
    print(f"task 3 TVD via MI, shots: {shots}, Total Variation Distance: {TVD_via_MI}")
    print(f"task 3 TVD via IBU, shots: {shots}, Total Variation Distance: {TVD_via_IBU}")

def plot_statistical_distance_comparison(results_df, title="Total Variation Distance Comparison Across Methods", save_path=None, task_name=None, unnormalized_df=None):
    """
    Plot the statistical distance comparison across different methods.
    
    Args:
        results_df: DataFrame containing the normalized results
        title: Plot title
        save_path: Path to save the plot
        task_name: Name of the task (used to determine legend labels)
        unnormalized_df: Optional DataFrame containing unnormalized results for comparison
    """
    import matplotlib.pyplot as plt
    import matplotlib.font_manager as fm
    import os
    
    # Set global font to Times New Roman
    plt.rcParams['font.family'] = 'Times New Roman'
    plt.rcParams['font.size'] = 14
    plt.rcParams['axes.labelsize'] = 16
    plt.rcParams['axes.titlesize'] = 18
    plt.rcParams['xtick.labelsize'] = 12
    plt.rcParams['ytick.labelsize'] = 12
    
    # Create figure and axis with higher resolution for publication
    fig, ax = plt.subplots(figsize=(12, 8), dpi=300)
    
    # Extract data
    shots_values = results_df['Shots'].unique()
    
    # Define colors and markers for specific methods - ensure consistency across all plots
    method_colors = {
        'Raw': '#000000',  # Black for Raw
        'QED': '#1f77b4',  # Blue for QED
        'MI': '#ff7f0e',   # Orange for MI
        'IBU': '#2ca02c',  # Green for IBU
    }
    
    markers = ['o', 's', '^', 'D', 'x']
    
    # For 2-qubit and 3-qubit systems, we want to show 5 specific labels
    if task_name in ['Two Qubit', 'Three Qubit'] and unnormalized_df is not None:
        # 1. Raw (black line)
        if 'TotalVariationDistance_Raw' in results_df.columns:
            grouped = results_df.groupby('Shots')['TotalVariationDistance_Raw']
            means = grouped.mean()
            stds = grouped.std()
            
            ax.errorbar(shots_values, means, yerr=stds, 
                       color=method_colors['Raw'], 
                       marker=markers[0], 
                       markersize=10,
                       linewidth=2.5,
                       elinewidth=1.5,
                       capsize=5,
                       label='Raw',
                       linestyle='-')
        
        # 2. MI+QED normalized (line)
        if 'TotalVariationDistance_MI' in results_df.columns:
            grouped = results_df.groupby('Shots')['TotalVariationDistance_MI']
            means = grouped.mean()
            stds = grouped.std()
            
            ax.errorbar(shots_values, means, yerr=stds, 
                       color=method_colors['MI'], 
                       marker=markers[1], 
                       markersize=10,
                       linewidth=2.5,
                       elinewidth=1.5,
                       capsize=5,
                       label='QED+MI (Normalized)',
                       linestyle='-')
        
        # 3. IBU+QED normalized (line)
        if 'TotalVariationDistance_IBU' in results_df.columns:
            grouped = results_df.groupby('Shots')['TotalVariationDistance_IBU']
            means = grouped.mean()
            stds = grouped.std()
            
            ax.errorbar(shots_values, means, yerr=stds, 
                       color=method_colors['IBU'], 
                       marker=markers[2], 
                       markersize=10,
                       linewidth=2.5,
                       elinewidth=1.5,
                       capsize=5,
                       label='QED+IBU (Normalized)',
                       linestyle='-')
        
        # 4. MI+QED unnormalized (dash line)
        if 'TotalVariationDistance_MI' in unnormalized_df.columns:
            grouped = unnormalized_df.groupby('Shots')['TotalVariationDistance_MI']
            means = grouped.mean()
            stds = grouped.std()
            
            ax.errorbar(shots_values, means, yerr=stds, 
                       color=method_colors['MI'], 
                       marker=markers[3], 
                       markersize=10,
                       linewidth=2.5,
                       elinewidth=1.5,
                       capsize=5,
                       label='QED+MI (Unnormalized)',
                       linestyle='--')
        
        # 5. IBU+QED unnormalized (dash line)
        if 'TotalVariationDistance_IBU' in unnormalized_df.columns:
            grouped = unnormalized_df.groupby('Shots')['TotalVariationDistance_IBU']
            means = grouped.mean()
            stds = grouped.std()
            
            ax.errorbar(shots_values, means, yerr=stds, 
                       color=method_colors['IBU'], 
                       marker=markers[4], 
                       markersize=10,
                       linewidth=2.5,
                       elinewidth=1.5,
                       capsize=5,
                       label='QED+IBU (Unnormalized)',
                       linestyle='--')
    
    # For Single Qubit, use the same colors as 2-qubit and 3-qubit
    elif task_name == 'Single Qubit':
        # 1. Raw (black line)
        if 'TotalVariationDistance_Raw' in results_df.columns:
            grouped = results_df.groupby('Shots')['TotalVariationDistance_Raw']
            means = grouped.mean()
            stds = grouped.std()
            
            ax.errorbar(shots_values, means, yerr=stds, 
                       color=method_colors['Raw'], 
                       marker=markers[0], 
                       markersize=10,
                       linewidth=2.5,
                       elinewidth=1.5,
                       capsize=5,
                       label='Raw',
                       linestyle='-')
        
        # 2. MI (line) - use same color as MI+QED in 2-qubit and 3-qubit (orange)
        if 'TotalVariationDistance_MI' in results_df.columns:
            grouped = results_df.groupby('Shots')['TotalVariationDistance_MI']
            means = grouped.mean()
            stds = grouped.std()
            
            ax.errorbar(shots_values, means, yerr=stds, 
                       color=method_colors['MI'],  # Ensure this is orange (#ff7f0e)
                       marker=markers[1], 
                       markersize=10,
                       linewidth=2.5,
                       elinewidth=1.5,
                       capsize=5,
                       label='MI',
                       linestyle='-')
        
        # 3. IBU (line) - use same color as IBU+QED in 2-qubit and 3-qubit (green)
        if 'TotalVariationDistance_IBU' in results_df.columns:
            grouped = results_df.groupby('Shots')['TotalVariationDistance_IBU']
            means = grouped.mean()
            stds = grouped.std()
            
            ax.errorbar(shots_values, means, yerr=stds, 
                       color=method_colors['IBU'],  # Ensure this is green (#2ca02c)
                       marker=markers[2], 
                       markersize=10,
                       linewidth=2.5,
                       elinewidth=1.5,
                       capsize=5,
                       label='IBU',
                       linestyle='-')
    
    # For other cases, use the original plotting logic
    else:
        # Determine which columns to plot based on what's available
        columns_to_plot = []
        if 'TotalVariationDistance_Raw' in results_df.columns:
            columns_to_plot.append('TotalVariationDistance_Raw')
        if 'TotalVariationDistance_QED' in results_df.columns:
            columns_to_plot.append('TotalVariationDistance_QED')
        if 'TotalVariationDistance_MI' in results_df.columns:
            columns_to_plot.append('TotalVariationDistance_MI')
        if 'TotalVariationDistance_QED_MI' in results_df.columns:
            columns_to_plot.append('TotalVariationDistance_QED_MI')
        if 'TotalVariationDistance_IBU' in results_df.columns:
            columns_to_plot.append('TotalVariationDistance_IBU')
        if 'TotalVariationDistance_QED_IBU' in results_df.columns:
            columns_to_plot.append('TotalVariationDistance_QED_IBU')
        
        # Map column names to method colors for consistency
        column_to_color = {
            'TotalVariationDistance_Raw': method_colors['Raw'],
            'TotalVariationDistance_QED': method_colors['QED'],
            'TotalVariationDistance_MI': method_colors['MI'],
            'TotalVariationDistance_IBU': method_colors['IBU'],
            'TotalVariationDistance_QED_MI': method_colors['MI'],
            'TotalVariationDistance_QED_IBU': method_colors['IBU']
        }
        
        # Plot each method for normalized data (solid lines)
        for i, column in enumerate(columns_to_plot):
            # Group by shots and calculate mean and std
            grouped = results_df.groupby('Shots')[column]
            means = grouped.mean()
            stds = grouped.std()
            
            # Get label based on task name
            if task_name:
                if task_name == 'Single Qubit':
                    # For Single Qubit, don't include QED in labels
                    if column == 'TotalVariationDistance_Raw':
                        label = 'Raw'
                    elif column == 'TotalVariationDistance_MI':
                        label = 'MI'
                    elif column == 'TotalVariationDistance_IBU':
                        label = 'IBU'
                    else:
                        label = column.replace('TotalVariationDistance_', '')
                else:
                    # For Two Qubit and Three Qubit, include QED in labels
                    if column == 'TotalVariationDistance_Raw':
                        label = 'Raw'
                    elif column == 'TotalVariationDistance_QED':
                        label = 'QED'
                    elif column == 'TotalVariationDistance_MI':
                        label = 'QED+MI'
                    elif column == 'TotalVariationDistance_IBU':
                        label = 'QED+IBU'
                    else:
                        label = column.replace('TotalVariationDistance_', '')
            else:
                label = column.replace('TotalVariationDistance_', '')
            
            # Add "Normalized" to label if unnormalized_df is provided
            if unnormalized_df is not None:
                label += " (Normalized)"
            
            # Choose color based on method for consistency
            color = column_to_color.get(column, method_colors['Raw'])
            
            # Plot with error bars
            ax.errorbar(shots_values, means, yerr=stds, 
                       color=color, 
                       marker=markers[i % len(markers)], 
                       markersize=10,
                       linewidth=2.5,
                       elinewidth=1.5,
                       capsize=5,
                       label=label,
                       linestyle='-')  # Solid line for normalized
        
        # Plot unnormalized data if provided (dashed lines)
        if unnormalized_df is not None and not unnormalized_df.empty:
            for i, column in enumerate(columns_to_plot):
                if column in unnormalized_df.columns:
                    # Group by shots and calculate mean and std
                    grouped = unnormalized_df.groupby('Shots')[column]
                    means = grouped.mean()
                    stds = grouped.std()
                    
                    # Get label based on task name
                    if task_name:
                        if task_name == 'Single Qubit':
                            # For Single Qubit, don't include QED in labels
                            if column == 'TotalVariationDistance_Raw':
                                label = 'Raw'
                            elif column == 'TotalVariationDistance_MI':
                                label = 'MI'
                            elif column == 'TotalVariationDistance_IBU':
                                label = 'IBU'
                            else:
                                label = column.replace('TotalVariationDistance_', '')
                        else:
                            # For Two Qubit and Three Qubit, include QED in labels
                            if column == 'TotalVariationDistance_Raw':
                                label = 'Raw'
                            elif column == 'TotalVariationDistance_QED':
                                label = 'QED'
                            elif column == 'TotalVariationDistance_MI':
                                label = 'QED+MI'
                            elif column == 'TotalVariationDistance_IBU':
                                label = 'QED+IBU'
                            else:
                                label = column.replace('TotalVariationDistance_', '')
                    else:
                        label = column.replace('TotalVariationDistance_', '')
                    
                    # Add "Unnormalized" to label
                    label += " (Unnormalized)"
                    
                    # Choose color based on method for consistency
                    color = column_to_color.get(column, method_colors['Raw'])
                    
                    # Plot with error bars
                    ax.errorbar(shots_values, means, yerr=stds, 
                               color=color, 
                               marker=markers[i % len(markers)], 
                               markersize=10,
                               linewidth=2.5,
                               elinewidth=1.5,
                               capsize=5,
                               label=label,
                               linestyle='--')  # Dashed line for unnormalized
    
    # Set log scale for x-axis
    ax.set_xscale('log')
    
    # Set labels and title with consistent font sizes
    ax.set_xlabel('Number of Shots', fontsize=16, fontweight='bold')
    ax.set_ylabel('Total Variation Distance', fontsize=16, fontweight='bold')
    ax.set_title(title, fontsize=18, fontweight='bold')
    
    # Set tick parameters
    ax.tick_params(axis='both', which='major', labelsize=12)
    
    # Add grid
    ax.grid(True, linestyle='--', alpha=0.7)
    
    # Ensure y-axis starts from 0
    ax.set_ylim(bottom=0)
    
    # Add legend with two columns for better readability
    ax.legend(fontsize=12, loc='best', frameon=True, fancybox=True, 
             shadow=True, ncol=2)
    
    # Add a text box with publication information
    plt.figtext(0.01, 0.01, 
               "Quantum Error Mitigation: Normalized vs Unnormalized Response Matrices", 
               fontsize=8, style='italic')
    
    # Tight layout
    plt.tight_layout()
    
    # Save the figure if a path is provided
    if save_path:
        try:
            # Create directory if it doesn't exist
            save_dir = os.path.dirname(save_path)
            if save_dir and not os.path.exists(save_dir):
                os.makedirs(save_dir, exist_ok=True)
                
            # Save in multiple formats for different purposes
            # PDF for vector graphics (publication quality)
            pdf_path = os.path.splitext(save_path)[0] + '.pdf'
            plt.savefig(pdf_path, dpi=300, bbox_inches='tight', format='pdf')
            print(f"Vector plot saved to {pdf_path}")
            
            # PNG for high-resolution raster graphics (web/presentations)
            png_path = os.path.splitext(save_path)[0] + '.png'
            plt.savefig(png_path, dpi=600, bbox_inches='tight', format='png')
            print(f"High-resolution plot saved to {png_path}")
            
            # JPEG for compatibility (without quality parameter)
            jpg_path = os.path.splitext(save_path)[0] + '.jpg'
            plt.savefig(jpg_path, dpi=600, bbox_inches='tight', format='jpg')
            print(f"JPEG plot saved to {jpg_path}")
            
        except PermissionError:
            # If permission error, try saving to a fallback location
            fallback_dir = os.path.join(os.path.expanduser("~"), "Downloads")
            fallback_base = os.path.join(fallback_dir, os.path.basename(os.path.splitext(save_path)[0]))
            
            try:
                # Save in multiple formats in fallback location
                plt.savefig(fallback_base + '.pdf', dpi=300, bbox_inches='tight', format='pdf')
                plt.savefig(fallback_base + '.png', dpi=600, bbox_inches='tight', format='png')
                plt.savefig(fallback_base + '.jpg', dpi=600, bbox_inches='tight', format='jpg')
                print(f"Permission denied for {save_path}. Plots saved to {fallback_base}.* instead.")
            except Exception as e:
                print(f"Warning: Could not save plot to {save_path} or fallback location: {str(e)}")
        except Exception as e:
            print(f"Warning: Could not save plot to {save_path}: {str(e)}")
    
    return fig, ax

def collect_results(task_func, shots_list, task_name):
    """
    Collect results from running a task function with different shot counts.
    
    Args:
        task_func: Function to run for each shot count
        shots_list: List of shot counts to test
        task_name: Name of the task for logging
        
    Returns:
        DataFrame with results
    """
    results = []
    
    for shots in shots_list:
        # Capture printed output
        old_stdout = sys.stdout
        captured_output = io.StringIO()
        sys.stdout = captured_output
        
        try:
            # Run the task function
            task_func(shots)
            
            # Get the captured output
            sys.stdout = old_stdout
            output = captured_output.getvalue()
            
            # Initialize result dictionary
            result = {'Shots': shots}
            
            # Parse Total Variation Distances from output
            if task_name in ['Two Qubit', 'Three Qubit', 'Two Qubit Unnormalized', 'Three Qubit Unnormalized']:
                # For Two Qubit and Three Qubit tasks
                tvd_nothing_match = re.search(r'TVD via nothing.*Total Variation Distance: ([\d\.]+)', output)
                tvd_mi_match = re.search(r'TVD via MI.*Total Variation Distance: ([\d\.]+)', output)
                tvd_ibu_match = re.search(r'TVD via IBU.*Total Variation Distance: ([\d\.]+)', output)
                
                if tvd_nothing_match:
                    result['TotalVariationDistance_QED'] = float(tvd_nothing_match.group(1))
                if tvd_mi_match:
                    result['TotalVariationDistance_MI'] = float(tvd_mi_match.group(1))
                if tvd_ibu_match:
                    result['TotalVariationDistance_IBU'] = float(tvd_ibu_match.group(1))
            else:
                # For Single Qubit task
                tvd_nothing_match = re.search(r'TVD via nothing.*Total Variation Distance: ([\d\.]+)', output)
                tvd_mi_match = re.search(r'TVD via MI.*Total Variation Distance: ([\d\.]+)', output)
                tvd_ibu_match = re.search(r'TVD via IBU.*Total Variation Distance: ([\d\.]+)', output)
                
                if tvd_nothing_match:
                    result['TotalVariationDistance_Raw'] = float(tvd_nothing_match.group(1))
                if tvd_mi_match:
                    result['TotalVariationDistance_MI'] = float(tvd_mi_match.group(1))
                if tvd_ibu_match:
                    result['TotalVariationDistance_IBU'] = float(tvd_ibu_match.group(1))
            
            # Check if we have any valid results
            if len(result) > 1:  # More than just 'Shots'
                results.append(result)
            else:
                print(f"Error parsing output for {task_name} with {shots} shots: No valid TVD values found")
                print(f"Raw output was: {output[:200]}...")  # Print first 200 chars of output
        except Exception as e:
            sys.stdout = old_stdout
            print(f"Error parsing output for {task_name} with {shots} shots: {str(e)}")
            print(f"Raw output was: {captured_output.getvalue()[:200]}")  # Print first 200 chars
    
    # Convert to DataFrame
    if results:
        df = pd.DataFrame(results)
        # Remove rows with None values
        df = df.dropna()
        return df
    else:
        # Return empty DataFrame with expected columns
        columns = ['Shots']
        if task_name in ['Two Qubit', 'Three Qubit', 'Two Qubit Unnormalized', 'Three Qubit Unnormalized']:
            columns.extend(['TotalVariationDistance_QED', 'TotalVariationDistance_MI', 'TotalVariationDistance_IBU'])
        else:
            columns.extend(['TotalVariationDistance_Raw', 'TotalVariationDistance_MI', 'TotalVariationDistance_IBU'])
        return pd.DataFrame(columns=columns)

def generate_latex_table(df, caption="Total Variation Distance comparison across different methods"):
    """
    Generate LaTeX table from results
    """
    latex_table = df.to_latex(float_format="%.4f", index=False)
    latex_table = f"\\begin{{table}}[h]\n\\centering\n{latex_table}\n\\caption{{{caption}}}\n\\end{{table}}"
    return latex_table

def task2_unnormalized(shots: int = 10000):
    """
    Two-qubit QEC test with unnormalized response matrix.
    """
    # Configuration
    LOGICAL_NUM_QUBITS = 1
    VECTOR_SIZE = 2
    QUBIT_GROUPS = [[56, 57]]
    IBU_ITERATIONS = 1
    
    # Load and process data
    test_df = load_experimental_data(f"data/data_20250223/two-qubit_h_test_shots/two-qubit_test_shots_{shots}shots.csv")
    test_dict = parse_experiment_results(test_df)[0]
    two_qubit_qec_benchmark_df = load_experimental_data(f"data/data_20250223/one-qubit_benchmark_qec/two-qubit_benchmark_qec_{shots}shots.csv")
    
    # Use majority voting to get the measurement vector
    ymes = measurement_dict_to_vector_majority(QUBIT_GROUPS, test_dict)
    
    # Ensure ymes is the right size (2 elements for 1 logical qubit)
    if len(ymes) > 2:
        ymes = np.array([ymes[0], ymes[1]])
    
    # Construct unnormalized response matrix using majority voting
    # Use the standard construct_response_matrix_majority function with normalized=False
    response_matrix_unnorm = construct_response_matrix_majority(QUBIT_GROUPS, two_qubit_qec_benchmark_df, normalized=False)
    
    # Perform error mitigation with unnormalized matrix
    # Matrix Inversion method with regularization to handle potential numerical issues
    try:
        mitigated_vector_via_MI = matrix_inversion_mitigation(ymes, response_matrix_unnorm)
        # Ensure the result is a valid probability distribution
        mitigated_vector_via_MI = np.clip(mitigated_vector_via_MI, 0, 1)
        mitigated_vector_via_MI = mitigated_vector_via_MI / np.sum(mitigated_vector_via_MI)
    except np.linalg.LinAlgError:
        # Handle potential singular matrix
        print("Warning: Matrix inversion failed, using regularized inversion")
        # Add small value to diagonal for regularization
        reg_matrix = response_matrix_unnorm + np.eye(response_matrix_unnorm.shape[0]) * 1e-6
        mitigated_vector_via_MI = np.ravel(np.matmul(np.linalg.inv(reg_matrix), ymes))
        mitigated_vector_via_MI = np.clip(mitigated_vector_via_MI, 0, 1)
        mitigated_vector_via_MI = mitigated_vector_via_MI / np.sum(mitigated_vector_via_MI)
    
    # IBU method
    mitigated_vector_via_IBU = IBU(ymes, np.ones(VECTOR_SIZE) / VECTOR_SIZE, response_matrix_unnorm, IBU_ITERATIONS)
    # Ensure the result is a valid probability distribution
    mitigated_vector_via_IBU = np.clip(mitigated_vector_via_IBU, 0, 1)
    mitigated_vector_via_IBU = mitigated_vector_via_IBU / np.sum(mitigated_vector_via_IBU)
    
    # Calculate TVD
    true_probs = {'0':0.5, '1':0.5}
    
    # Ensure vectors are normalized for TVD calculation
    ymes_norm = ymes / np.sum(ymes) if np.sum(ymes) > 0 else ymes
    TVD_via_nothing = TVD(ymes_norm, measurement_dict_to_vector(LOGICAL_NUM_QUBITS, true_probs))
    TVD_via_MI = TVD(mitigated_vector_via_MI, measurement_dict_to_vector(LOGICAL_NUM_QUBITS, true_probs))
    TVD_via_IBU = TVD(mitigated_vector_via_IBU, measurement_dict_to_vector(LOGICAL_NUM_QUBITS, true_probs))
    
    print(f'task 2 Unnormalized TVD via nothing, shots: {shots}, Total Variation Distance: {TVD_via_nothing:.4f}')
    print(f"task 2 Unnormalized TVD via MI, shots: {shots}, Total Variation Distance: {TVD_via_MI:.4f}")
    print(f"task 2 Unnormalized TVD via IBU, shots: {shots}, Total Variation Distance: {TVD_via_IBU:.4f}")

def task3_unnormalized(shots: int = 10000):
    """
    Three-qubit repetition code test with unnormalized response matrix.
    """
    # Configuration
    LOGICAL_NUM_QUBITS = 1
    VECTOR_SIZE = 2
    QUBIT_GROUPS = [[56, 55, 57]]
    IBU_ITERATIONS = 1

    # Load and process data
    test_df = load_experimental_data(f"data/data_20250223/three-qubit_repetition_code_hadamard/three-qubit_repetition_code_hadamard_{shots}shots.csv")
    test_dict = parse_experiment_results(test_df)[0]
    three_qubit_qec_benchmark_df = load_experimental_data(f"data/data_20250223/one-qubit_benchmark_qec/three-qubit_benchmark_qec_{shots}shots.csv")
    
    # Use majority voting to get the measurement vector
    ymes = measurement_dict_to_vector_majority(QUBIT_GROUPS, test_dict)
    
    # Ensure ymes is the right size (2 elements for 1 logical qubit)
    if len(ymes) > 2:
        ymes = np.array([ymes[0], ymes[1]])

    # Construct unnormalized response matrix using majority voting
    # Use the standard construct_response_matrix_majority function with normalized=False
    response_matrix_unnorm = construct_response_matrix_majority(QUBIT_GROUPS, three_qubit_qec_benchmark_df, normalized=False)

    # Perform error mitigation with unnormalized matrix
    # Matrix Inversion method with regularization to handle potential numerical issues
    try:
        mitigated_vector_via_MI = matrix_inversion_mitigation(ymes, response_matrix_unnorm)
        # Ensure the result is a valid probability distribution
        mitigated_vector_via_MI = np.clip(mitigated_vector_via_MI, 0, 1)
        mitigated_vector_via_MI = mitigated_vector_via_MI / np.sum(mitigated_vector_via_MI)
    except np.linalg.LinAlgError:
        # Handle potential singular matrix
        print("Warning: Matrix inversion failed, using regularized inversion")
        # Add small value to diagonal for regularization
        reg_matrix = response_matrix_unnorm + np.eye(response_matrix_unnorm.shape[0]) * 1e-6
        mitigated_vector_via_MI = np.ravel(np.matmul(np.linalg.inv(reg_matrix), ymes))
        mitigated_vector_via_MI = np.clip(mitigated_vector_via_MI, 0, 1)
        mitigated_vector_via_MI = mitigated_vector_via_MI / np.sum(mitigated_vector_via_MI)

    # IBU method
    mitigated_vector_via_IBU = IBU(ymes, np.ones(VECTOR_SIZE) / VECTOR_SIZE, response_matrix_unnorm, IBU_ITERATIONS)
    # Ensure the result is a valid probability distribution
    mitigated_vector_via_IBU = np.clip(mitigated_vector_via_IBU, 0, 1)
    mitigated_vector_via_IBU = mitigated_vector_via_IBU / np.sum(mitigated_vector_via_IBU)

    # Calculate TVD
    true_probs = {'0':0.5, '1':0.5}
    
    # Ensure vectors are normalized for TVD calculation
    ymes_norm = ymes / np.sum(ymes) if np.sum(ymes) > 0 else ymes
    TVD_via_nothing = TVD(ymes_norm, measurement_dict_to_vector(LOGICAL_NUM_QUBITS, true_probs))
    TVD_via_MI = TVD(mitigated_vector_via_MI, measurement_dict_to_vector(LOGICAL_NUM_QUBITS, true_probs))
    TVD_via_IBU = TVD(mitigated_vector_via_IBU, measurement_dict_to_vector(LOGICAL_NUM_QUBITS, true_probs))
    
    print(f'task 3 Unnormalized TVD via nothing, shots: {shots}, Total Variation Distance: {TVD_via_nothing:.4f}')
    print(f"task 3 Unnormalized TVD via MI, shots: {shots}, Total Variation Distance: {TVD_via_MI:.4f}")
    print(f"task 3 Unnormalized TVD via IBU, shots: {shots}, Total Variation Distance: {TVD_via_IBU:.4f}")


def main():
    # Reduce shots to available data
    shots = [1000, 5000, 100000]
    
    # Create results directory if it doesn't exist
    results_dir = "results"
    try:
        os.makedirs(results_dir, exist_ok=True)
        
        # Create subdirectories for different formats
        plots_dir = os.path.join(results_dir, "plots")
        os.makedirs(plots_dir, exist_ok=True)
        
        print(f"Created output directories: {results_dir} and {plots_dir}")
    except Exception as e:
        print(f"Warning: Could not create output directories: {str(e)}")
        # Fallback to user's home directory if needed
        home_dir = os.path.expanduser("~")
        results_dir = os.path.join(home_dir, "quantum_results")
        plots_dir = os.path.join(results_dir, "plots")
        try:
            os.makedirs(plots_dir, exist_ok=True)
            print(f"Using fallback directories: {results_dir}")
        except Exception as e2:
            print(f"Warning: Could not create fallback directories: {str(e2)}")
            # Last resort - use current directory
            results_dir = "."
            plots_dir = "."
    
    # Collect results for each task
    results = {}
    results['Single Qubit'] = collect_results(task1, shots, "Single Qubit")
    # Get raw TVD from task1 as baseline
    task1_results = collect_results(task1, shots, "Single Qubit")
    task1_raw_tvd = task1_results['TotalVariationDistance_Raw']
    
    # Collect results for task2 and task3
    results['Two Qubit'] = collect_results(task2, shots, "Two Qubit")
    results['Three Qubit'] = collect_results(task3, shots, "Three Qubit")
    results['Two Qubit Unnormalized'] = collect_results(task2_unnormalized, shots, "Two Qubit Unnormalized")
    results['Three Qubit Unnormalized'] = collect_results(task3_unnormalized, shots, "Three Qubit Unnormalized")
    
    # Add task1 raw TVD to task2 and task3 results
    results['Two Qubit']['TotalVariationDistance_Raw'] = task1_raw_tvd
    results['Three Qubit']['TotalVariationDistance_Raw'] = task1_raw_tvd
    
    # Generate direct comparison plots (normalized vs unnormalized in the same figure)
    try:
        # Single Qubit plot (no unnormalized version)
        plot_statistical_distance_comparison(
            results['Single Qubit'],
            "Single Qubit System TVD Comparison",
            save_path='./results/plots/1-qubit_comparison',
            task_name="Single Qubit"
        )
        
        plot_statistical_distance_comparison(
            results['Two Qubit'],
            "Two Qubit System: Normalized vs Unnormalized Response Matrices",
            save_path='./results/plots/2-qubit_comparison',
            task_name="Two Qubit",
            unnormalized_df=results['Two Qubit Unnormalized'],
        )
        
        # Three Qubit comparison
        plot_statistical_distance_comparison(
            results['Three Qubit'],
            "Three Qubit System: Normalized vs Unnormalized Response Matrices",
            save_path='./results/plots/3-qubit_comparison',
            task_name="Three Qubit",
            unnormalized_df=results['Three Qubit Unnormalized']
        )
        
        
        print("Generated plots successfully")
    except Exception as e:
        print(f"Error generating plots: {str(e)}")
    
    # Generate LaTeX tables
    table_path = os.path.join(results_dir, 'results_tables.tex')
    try:
        with open(table_path, 'w') as f:
            for name, df in results.items():
                latex_table = generate_latex_table(df, f"TVD comparison for {name} system")
                f.write(latex_table + '\n\n')
        print(f"LaTeX tables saved to {table_path}")
    except Exception as e:
        print(f"Warning: Could not save LaTeX tables: {str(e)}")
        # Try fallback location
        fallback_path = os.path.join(os.path.expanduser("~"), "Downloads", "results_tables.tex")
        try:
            with open(fallback_path, 'w') as f:
                for name, df in results.items():
                    latex_table = generate_latex_table(df, f"TVD comparison for {name} system")
                    f.write(latex_table + '\n\n')
            print(f"LaTeX tables saved to fallback location: {fallback_path}")
        except Exception as e2:
            print(f"Warning: Could not save LaTeX tables to fallback location: {str(e2)}")
    
    # Print summary statistics and also save to file
    summary_path = os.path.join(results_dir, 'summary.txt')
    try:
        with open(summary_path, 'w') as f:
            summary = "\nSummary of Results:\n"
            summary += "==================\n"
            
            # Group results for comparison
            grouped_results = {
                'Two Qubit': ['Two Qubit', 'Two Qubit Unnormalized'],
                'Three Qubit': ['Three Qubit', 'Three Qubit Unnormalized']
            }
            
            # First summarize individual results
            for name, df in results.items():
                summary += f"\n{name} System:\n"
                summary += "-" * (len(name) + 8) + "\n"
                
                # Use appropriate column names based on the task
                if "Unnormalized" in name:
                    baseline_col = 'TotalVariationDistance_Raw'
                elif name == "Single Qubit":
                    baseline_col = 'TotalVariationDistance_Raw'
                else:
                    baseline_col = 'TotalVariationDistance_QED'
                    
                # Get baseline value
                if baseline_col in df.columns:
                    baseline = df[baseline_col].mean()
                    summary += f"  Baseline: {baseline:.4f}\n"
                
                # Add MI and IBU results
                if 'TotalVariationDistance_MI' in df.columns:
                    mi_val = df['TotalVariationDistance_MI'].mean()
                    summary += f"  MI: {mi_val:.4f}\n"
                    
                    # Calculate improvement if baseline exists
                    if baseline_col in df.columns:
                        mi_improvement = ((baseline - mi_val) / baseline) * 100
                        summary += f"  MI Improvement: {mi_improvement:+.1f}%\n"
                
                if 'TotalVariationDistance_IBU' in df.columns:
                    ibu_val = df['TotalVariationDistance_IBU'].mean()
                    summary += f"  IBU: {ibu_val:.4f}\n"
                    
                    # Calculate improvement if baseline exists
                    if baseline_col in df.columns:
                        ibu_improvement = ((baseline - ibu_val) / baseline) * 100
                        summary += f"  IBU Improvement: {ibu_improvement:+.1f}%\n"
            
            # Then compare normalized vs unnormalized
            summary += "\n\nNormalized vs Unnormalized Comparison:\n"
            summary += "=====================================\n"
            
            for group_name, result_names in grouped_results.items():
                if len(result_names) == 2 and all(name in results for name in result_names):
                    norm_df = results[result_names[0]]
                    unnorm_df = results[result_names[1]]
                    
                    summary += f"\n{group_name} Comparison:\n"
                    summary += "-" * (len(group_name) + 12) + "\n"
                    
                    # Compare MI results
                    if 'TotalVariationDistance_MI' in norm_df.columns and 'TotalVariationDistance_MI' in unnorm_df.columns:
                        norm_mi = norm_df['TotalVariationDistance_MI'].mean()
                        unnorm_mi = unnorm_df['TotalVariationDistance_MI'].mean()
                        diff_mi = ((norm_mi - unnorm_mi) / norm_mi) * 100 if norm_mi != 0 else float('nan')
                        
                        summary += f"  MI Normalized: {norm_mi:.4f}\n"
                        summary += f"  MI Unnormalized: {unnorm_mi:.4f}\n"
                        summary += f"  Difference: {diff_mi:+.1f}%\n"
                    
                    # Compare IBU results
                    if 'TotalVariationDistance_IBU' in norm_df.columns and 'TotalVariationDistance_IBU' in unnorm_df.columns:
                        norm_ibu = norm_df['TotalVariationDistance_IBU'].mean()
                        unnorm_ibu = unnorm_df['TotalVariationDistance_IBU'].mean()
                        diff_ibu = ((norm_ibu - unnorm_ibu) / norm_ibu) * 100 if norm_ibu != 0 else float('nan')
                        
                        summary += f"  IBU Normalized: {norm_ibu:.4f}\n"
                        summary += f"  IBU Unnormalized: {unnorm_ibu:.4f}\n"
                        summary += f"  Difference: {diff_ibu:+.1f}%\n"
            
            print(summary)  # Print to console
            f.write(summary)  # Save to file
            print(f"Summary saved to {summary_path}")
    except Exception as e:
        print(f"Warning: Could not save summary: {str(e)}")
        # Try fallback location
        fallback_path = os.path.join(os.path.expanduser("~"), "Downloads", "summary.txt")
        try:
            with open(fallback_path, 'w') as f:
                f.write(summary)
            print(f"Summary saved to fallback location: {fallback_path}")
        except Exception as e2:
            print(f"Warning: Could not save summary to fallback location: {str(e2)}")

if __name__ == "__main__":
    main()