from typing import List, Dict
import json

import numpy as np
import pandas as pd


def number_to_bitstring(num_qubits: int, number: int, reverse: bool = False) -> str:
    """
    Convert integer to binary bit string representation.
    
    Args:
        num_qubits: Number of qubits in the system
        number: Integer to convert
        reverse: If True, MSB will be on left side
        
    Returns:
        Binary string representation
    """
    bitstring = bin(number)[2:].zfill(num_qubits)
    return bitstring[::-1] if reverse else bitstring

def parse_experiment_results(df: pd.DataFrame) -> List[Dict]:
    """
    Parse experimental results from DataFrame into list of dictionaries.
    
    Args:
        df: DataFrame containing experimental results (must have 'exp_result' column)
        
    Returns:
        List of dictionaries containing parsed experimental data for each row
    """
    return [json.loads(row['exp_result']) for _, row in df.iterrows()]

def construct_response_matrix(groups: List[List[int]], df: pd.DataFrame, normalize: bool = True) -> np.ndarray:
    """
    Construct the response matrix for error mitigation.
    
    Args:
        groups: List of qubit groups
        df: DataFrame containing calibration data
        normalize: Whether to normalize the response matrix

    Returns:
        Response matrix R
    """
    # Initialize list to store response matrices for each group
    response_matrices = []
    
    # Process each group
    for group in groups:
        # Create response matrix for current group
        group_size = len(group)
        R = np.zeros((2**group_size, 2**group_size))
        
        # Get data for current group
        group_data = df.loc[(df['qubits'] == json.dumps(group))]
        
        # Fill response matrix
        for i, row in group_data.iterrows():
            true_state = number_to_bitstring(group_size, i)
            results = json.loads(row['exp_result'])
            for measured, prob in results.items():
                R[int(measured, 2), int(true_state, 2)] = prob
                
        response_matrices.append(R)
    
    # Combine all response matrices using Kronecker product
    final_R = response_matrices[0]
    for R in response_matrices[1:]:
        final_R = np.kron(final_R, R)
        
    # normalize each column of final_R
    if normalize:
        for i in range(len(final_R)):
            final_R[:, i] = final_R[:, i] / np.sum(final_R[:, i])

    return final_R

def construct_logical_response_matrix(df: pd.DataFrame, mapping: Dict[str, str], normalize: bool = True) -> np.ndarray:
    """
    Construct the logical response matrix for error mitigation.
    
    Args:
        df: DataFrame containing experimental results
        mapping: Dictionary mapping measured state to logical states
        normalize: Whether to normalize the logical response matrix

    Returns:
        Logical response matrix
    """
    # Initialize logical response matrix
    logical_n_qubits = len(mapping) // 2
    logical_R = np.zeros((2**logical_n_qubits, 2**logical_n_qubits))
    original_n_qubits = len(list(mapping.keys())[0])
    
    # Fill logical response matrix
    # The issue was that we were iterating through all rows for each logical state
    # Instead, we should process each row once and map to the appropriate logical state
    for idx, row in df.iterrows():
        # Get the true state index from the row index in the dataframe
        true_state_idx = idx
        true_state = number_to_bitstring(num_qubits=original_n_qubits, number=true_state_idx)
        if true_state in mapping:
            logical_state = mapping[true_state]
        else:
            continue
        
        # Process the experimental results for this row
        experimental_results = json.loads(row['exp_result'])
        for measured, prob in experimental_results.items():
            if measured in mapping:
                logical_measured = mapping[measured]
                logical_R[int(logical_measured, 2), int(logical_state, 2)] = prob
    
    # Normalize each column of logical_R
    if normalize:
        for i in range(len(logical_R)):
            column_sum = np.sum(logical_R[:, i])
            if column_sum > 0:  # Avoid division by zero
                logical_R[:, i] = logical_R[:, i] / column_sum

    return logical_R

def construct_measured_array(num_qubits: int, measurement_df: pd.DataFrame, mapping: Dict[str, str] = None) -> np.ndarray:
    """
    Construct the measured array for error mitigation.
    
    Args:
        num_qubits: Number of qubits
        measurement_df: DataFrame containing measurement results
        mapping: Dictionary mapping measured state to logical states
        
    Returns:
        Measured array (normalized probability distribution)
    """
    
    if mapping is None:
        # Initialize measured array
        measured_array = np.zeros(2**num_qubits)
        for idx, row in measurement_df.iterrows():
            results = json.loads(row['exp_result'])
            for measured, prob in results.items():
                measured_array[int(measured, 2)] = prob
    else:
        # Initialize measured array
        logical_n_qubits = len(mapping) // 2
        measured_array = np.zeros(2**logical_n_qubits)
        for idx, row in measurement_df.iterrows():
            results = json.loads(row['exp_result'])
            for measured, prob in results.items():
                if measured in mapping:
                    logical_measured = mapping[measured]
                    measured_array[int(logical_measured, 2)] = prob

    # Normalize the measured array
    total_sum = np.sum(measured_array)
    if total_sum > 0:  # Avoid division by zero
        measured_array = measured_array / total_sum
    
    return measured_array

def normolize_vector(vector: np.ndarray, index: list):
    """
    Normalize the vector based on the index list.
    
    Args:
        vector: The vector to normalize.
        index: The index list.
        
    Returns:
        The normalized vector.
    """ 
    
    new_vector = np.zeros(len(index))
    for i, idx in enumerate(index):
        new_vector[i] = vector[idx]
    return new_vector / np.sum(new_vector)