import numpy as np

class NaiveActor:
    def __init__(self, goal_delta = 0.2, max_df = 0.2, kp = 4, fg_ub = 15.0, fg_lb = 1.0):
        self.goal_delta = goal_delta
        self.goal_ratio = 1 - goal_delta
        self.kp = kp / (1 - goal_delta)  # Proportional gain for grasp force adjustment
        self.fg_ub = fg_ub
        self.fg_lb = fg_lb

        # The maximum grasp force increment that can be applied
        self.max_df = max_df

        self.state = {
            "delta" : np.zeros(2),  # The distance that current contact to macro slip
            "fg" : np.zeros(2),
        }


    def act(self):
        """
        Selects an grasp force increment based on the delta and the tangental force, grasping force.
        
        Args:
            delta: The distance that current contact to macro slip.
            fg: The grasping force at the contact point.
        
        Returns:
            fg: The new grasp force to apply.
            
        """
        fg = np.mean(self.state["fg"])
        delta_fg = self.get_delta_fg(fg)
        new_fg = fg + delta_fg
        new_fg = min(max(new_fg, self.fg_lb),self.fg_ub)
        return new_fg  

    def get_delta_fg(self, fg):

        delta = np.mean(self.state["delta"])  # The distance that current contact to macro slip
        
        ratio = 1 - delta # the ratio of ft to maximum tangential force
        if ratio > self.goal_ratio:
            df = self.kp * (ratio - self.goal_ratio) * fg
            df = min(df, self.max_df)
        elif ratio < self.goal_ratio:
            df = self.kp * (ratio - self.goal_ratio) * fg
            df = max(df, -self.max_df)
        else:
            df = 0.0
        
        return df
    
    def update_state(self, gf_id, **kwargs):
        """
        Updates the internal state of the actor with new values.
        
        Args:
            **kwargs: Keyword arguments containing the new state values.
        """
        for key, value in kwargs.items():
            if key in self.state:
                if type(self.state[key]) is np.ndarray:
                    self.state[key][gf_id] = value
                
                elif isinstance(self.state[key], (int, float)):
                    self.state[key] = value
            else:
                continue  # Ignore keys that are not in the state dictionary
    
    def restart(self):
        """
        Resets the internal state of the actor.
        """
        self.state = {
            "delta" : np.zeros(2),  # The distance that current contact to macro slip
            "fg" : np.zeros(2),
        }

    @property
    def params(self):
        return {"kp": self.kp, "goal_delta": self.goal_delta, "max_df": self.max_df,
                "fg_ub": self.fg_ub, "fg_lb": self.fg_lb}
    
    def pack_state(self):
        """
        Packs the current state into a dictionary.
        
        Returns:
            dict: The packed state.
        """
        return {
            "delta": self.state["delta"].mean(),
            "fg": self.state["fg"].mean(),
        }
    
    def pass_exp_data(self):
        return {}
    
    def try_store_transition(self, state, done, succeed):
        return

    def save_train_results(self, dir):
        return
    
    def save_model(self):
        return
    
    def train(self):
        return