import numpy as np

class PrivacyPreserver:
    def __init__(self, epsilon=1.0, sensitivity=1.0):
        self.epsilon = epsilon  # Privacy budget
        self.sensitivity = sensitivity  # ΔR (Eq.3)
    
    def laplace_noise(self, data: float) -> float:
        """
        Apply differential privacy via Laplace mechanism (Algorithm 3)
        Args:
            data: Original trust value
        Returns:
            Noised output preserving (ε,0)-DP
        """
        scale = self.sensitivity / self.epsilon
        noise = np.random.laplace(0, scale)
        return max(0, min(data + noise, 100))  # Clip to [0,100]
    
    def adaptive_epsilon(self, reputation: float) -> float:
        """
        Dyanmic privacy budget allocation (Sec 3.5.2)
        Args:
            reputation: Current trust score R_i
        Returns:
            Context-aware epsilon value
        """
        # High-trust: relaxed privacy, Low-trust: strict privacy
        return 2.0 if reputation > 70 else 0.5
