import numpy as np

# *************** Analysis Functions ***************
def TVD(p: np.ndarray, q: np.ndarray) -> float:
    """
    Calculate the Total Variation Distance between two probability distributions.
    
    Args:
        p: First probability distribution
        q: Second probability distribution
        
    Returns:
        Total Variation Distance between p and q
    """
    # Ensure inputs are valid probability distributions
    if not (np.isclose(np.sum(p), 1.0, atol=1e-5) and np.isclose(np.sum(q), 1.0, atol=1e-5)):
        # Normalize if needed
        p_sum = np.sum(p)
        q_sum = np.sum(q)
        
        if p_sum > 0:
            p = p / p_sum
        else:
            p = np.ones_like(p) / len(p)  # Uniform distribution if sum is 0
            
        if q_sum > 0:
            q = q / q_sum
        else:
            q = np.ones_like(q) / len(q)  # Uniform distribution if sum is 0
            
        print(f"Warning: Input distributions were normalized. Original sums: p={p_sum:.6f}, q={q_sum:.6f}")
    
    # Handle negative values (which shouldn't occur in probability distributions)
    if np.any(p < 0) or np.any(q < 0):
        p = np.clip(p, 0, 1)
        q = np.clip(q, 0, 1)
        # Re-normalize after clipping
        p = p / np.sum(p)
        q = q / np.sum(q)
        print("Warning: Negative probabilities were clipped to 0 and distributions were renormalized")
    
    # Calculate TVD
    return 0.5 * np.sum(np.abs(p - q))

def Accuracy(rule: str, p: np.ndarray, q: np.ndarray) -> float:
    """
    Calculate the accuracy of a rule between two probability distributions.
    
    Args:
        rule: The rule to use to calculate the accuracy.
        p: The first probability distribution.
        q: The second probability distribution.
        
    Returns:
        The accuracy of the rule between the two probability distributions.
    """
    
    if rule == "TVD":
        return 1 - TVD(p, q) / 2
    else:
        raise ValueError(f"Invalid rule: {rule}")


