from .naive_actor import NaiveActor
import numpy as np

class PIActor(NaiveActor):
    def __init__(self, goal_delta=0.2, max_df=0.4, kp=4, ki=0.03, fg_ub=15.0, fg_lb=1.0):
        super().__init__(goal_delta, max_df, kp, fg_ub, fg_lb)
        self.ki = ki
        self.integral_error = 0.0
        self.max_intergral_error = max_df / ki

    
    def restart(self):
        """
        Resets the internal state of the actor.
        """
        super().restart()
        self.integral_error = 0.0
    
    def get_delta_fg(self, fg):
        """
        Calculates the change in grasp force based on the current delta and grasp force.
        
        Args:
            delta (float): The distance that current contact to macro slip.
            fg (float): The current grasp force at the contact point.
        
        Returns:
            float: The change in grasp force to apply.
        """
        delta = np.mean(self.state["delta"])
        # ft = np.mean(self.state["ft"])
        # dft = np.mean(self.state["dft"])

        ratio = 1 - delta
        self.integral_error += (ratio - self.goal_ratio) * fg
        self.integral_error = min(max(self.integral_error, -self.max_intergral_error), self.max_intergral_error)
        df = self.kp * (ratio - self.goal_ratio) * fg + self.ki * self.integral_error
        df = min(max(df, -self.max_df), self.max_df)
        return df
    
    @property
    def params(self):
        super_params = super().params
        return {**super_params, "ki": self.ki, "max_intergral_error": round(self.max_intergral_error, 3)}
