import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import numpy as np
from .naive_actor import NaiveActor
from .PPO_trainer import PPO_Trainer, PPO_Trainer_Batched
import pandas as pd


class RLForceController(nn.Module):
    def __init__(self, state_dim, f_min=1, f_max=15, delta_f_max=1.5):
        super().__init__()
        self.f_min = f_min
        self.f_max = f_max
        self.delta_f_max = delta_f_max
        self.state_dim = state_dim

        # 策略网络：输出动作参数 (μ, σ)
        self.policy_net = nn.Sequential(
            nn.Linear(state_dim, 64), nn.ReLU(), nn.Linear(64, 32), nn.ReLU(), nn.Linear(32, 2)  # 输出μ和log(σ)
        )

    def forward(self, state: torch.Tensor):
        # 输出动作分布参数
        current_fg = state[:, 1]  # 当前抓取力
        params = self.policy_net(state)
        mu = params[:, 0]
        log_sigma = params[:, 1]
        sigma = torch.exp(log_sigma) + 1e-6

        # 创建高斯分布（支持批量）
        dist = torch.distributions.Normal(mu, sigma)

        # 采样动作 (Δf_g)
        action = dist.rsample()  # 采样动作，使用重参数化技巧

        # 应用变化量约束 [-Δf_max, Δf_max]
        clipped_action = torch.clamp(action, -self.delta_f_max, self.delta_f_max)

        # 计算新力值并应用绝对约束
        new_fg = current_fg + clipped_action
        safe_fg = torch.clamp(new_fg, self.f_min, self.f_max)

        # 计算动作的对数概率
        log_prob = dist.log_prob(action)  # 计算采样动作的对数概率， 用于策略梯度更新

        delta_fg = safe_fg - current_fg  # 计算抓取力的变化量

        return safe_fg, delta_fg, log_prob


class GripperCritic(nn.Module):
    def __init__(self, state_dim):
        super().__init__()
        self.value_net = nn.Sequential(
            nn.Linear(state_dim, 64), nn.ReLU(), nn.Linear(64, 32), nn.ReLU(), nn.Linear(32, 1)  # 输出状态值V(s)
        )

    def forward(self, state):
        return self.value_net(state)

class RewardFunction:
    def __init__(self, f_min=1, f_max=15, delta_g=0.2):
        self.f_min = f_min
        self.f_max = f_max
        self.delta_g = delta_g  # 目标接触距离

    def compute_reward(self, state, next_state, action, successed = False):
        """
        计算奖励函数，基于抓取力和接触距离。

        Args:
            state: 当前状态，包含抓取力和接触距离等信息。
            next_state: 下一个状态，包含更新后的抓取力和接触距离等信息。
            action: 执行动作后得到的抓取力变化量。

        Returns:
            reward: 计算得到的奖励值。
        """
        # 奖励函数可以根据具体任务进行设计

        # 解包状态
        delta, current_fg, ccx, ccy, Ft, dFt = state
        next_delta, next_fg, ccx, ccy, next_Ft, next_dFt = next_state

        # 主追踪奖励
        tracking_error = delta - self.delta_g
        if tracking_error > 0:
            r_tracking = -10.0 * tracking_error**2 
        else:
            r_tracking = -30.0 * tracking_error**2  # 追踪误差越小，奖励越高


        r_tracking_2 = -15.0 * (tracking_error * (next_fg - current_fg))  # 奖励改进方向

        # 力变化惩罚 (鼓励平滑)
        r_smooth = -0.01 * (action**2) 

        # 边界安全奖励 (边界附近惩罚)
        boundary_margin = 0.03 * (self.f_max - self.f_min)
        if (next_fg < self.f_min + boundary_margin) or (next_fg > self.f_max - boundary_margin):
            r_boundary = -0.5 * (1.0 - (min(next_fg - self.f_min, self.f_max - next_fg) / boundary_margin))
        else:
            r_boundary = 0.0

        # 抓取成功奖励
        if successed == True:
            r_success = 5.0
        else:
            r_success = 0.0

        return r_tracking + r_tracking_2 + r_smooth + r_boundary + r_success, r_tracking, r_tracking_2




class RLActor(NaiveActor):
    def __init__(
        self, state_dim=6, f_min=1, f_max=15, delta_f_max=0.6, goal_delta=0.2, model_path="actor_critic/rl_actor.pth"
    ):
        """Initializes the RLActor with the given parameters."""
        self.state_dim = state_dim
        self.f_min = f_min
        self.f_max = f_max
        self.delta_f_max = delta_f_max
        self.goal_delta = goal_delta  # 目标接触距离
        self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

        # 初始化策略网络和价值网络
        self.actor = RLForceController(state_dim, f_min, f_max, delta_f_max).to(self.device)
        self.critic = GripperCritic(state_dim).to(self.device)
        self.ppo_trainer = PPO_Trainer_Batched(self.actor, self.critic, self.device, n_episodes_per_update = 4)
        self.reward_function = RewardFunction(f_min, f_max, delta_g=self.goal_delta)
        # 如果模型路径存在，则加载模型
        if model_path:
            try:
                checkpoint = torch.load(model_path, map_location=self.device)
                self.actor.load_state_dict(checkpoint["actor_state_dict"])
                self.critic.load_state_dict(checkpoint["critic_state_dict"])
                self.ppo_trainer.__dict__.update(checkpoint["ppo_trainer_state"])
            except FileNotFoundError:
                print(f"Model file {model_path} not found. Initializing new models.")
        else:
            print("No model path provided, initializing new models.")

        # 初始化PPO训练器

        self.state = {
            "delta": np.zeros(2),  # The distance that current contact to macro slip
            "fg": np.zeros(2),
            "ccx": np.zeros(2),  # The contact center x position
            "ccy": np.zeros(2),  # The contact center y position
            "ft": np.zeros(2),  # The tangential force at the contact point
            "dft": np.zeros(2),  # The tangential force increment at the contact point
        }

        self.last_state = None  # 用于存储上一个状态
        self.last_action = None  # 用于存储上一个动作
        self.last_log_prob = None  # 用于存储上一个动作的对数概率

        self.action_list = []
        self.reward_list = []
        self.tracking_reward_list = []
        self.tracking_reward_2_list = []

        self.results = {
            "actor_loss": [],
            "critic_loss": [],
            "entropy_bonus": []
        }


    def try_store_transition(self, state, done, successed):
        if self.last_state is None or self.last_action is None or self.last_log_prob is None:
            return None
        reward, tracking_reward, tracking_reward2 = self.reward_function.compute_reward(self.last_state, state, self.last_action, done and successed)
        self.action_list.append(self.last_action)
        self.reward_list.append(reward)
        self.tracking_reward_list.append(tracking_reward)
        self.tracking_reward_2_list.append(tracking_reward2)

        self.ppo_trainer.store_transition(self.last_state, self.last_action, self.last_log_prob, reward, state, done)
        return reward

    def act(self):
        """
        选择动作，基于当前状态和策略网络。

        Args:
            state: 当前状态，包含抓取力和接触距离等信息。

        Returns:
            fg: 计算得到的抓取力。

        """
        state = self.pack_state()  # 获取当前状态
        state_tensor = torch.tensor(state, dtype=torch.float32).unsqueeze(0).to(self.device)  # 转换为张量并添加批次维度
        # 使用策略网络获取动作
        safe_fg, delta_fg, log_prob = self.actor(state_tensor)
        fg = safe_fg.item()
        self.last_state = state
        self.last_action = delta_fg.item()
        self.last_log_prob = log_prob.item()

        return fg

    def train(self):
        """
        执行PPO训练步骤。
        """
        ret, result = self.ppo_trainer.update()
        if ret:
            self.save_model()  # 保存模型
            self.results["actor_loss"].append(result[0])
            self.results["critic_loss"].append(result[1])
            self.results["entropy_bonus"].append(result[2])


    def update_state(self, gf_id, **kwargs):
        """
        Updates the internal state of the actor with new values.

        Args:
            **kwargs: Keyword arguments containing the new state values.
        """
        for key, value in kwargs.items():
            if key in self.state:
                if type(self.state[key]) is np.ndarray:
                    self.state[key][gf_id] = value

                elif isinstance(self.state[key], (int, float)):
                    self.state[key] = value
            else:
                continue  # Ignore keys that are not in the state dictionary

    def restart(self):
        """
        Resets the internal state of the actor.
        """
        self.state = {
            "delta": np.zeros(2),
            "fg": np.zeros(2),
            "ccx": np.zeros(2),
            "ccy": np.zeros(2),
            "ft": np.zeros(2),
            "dft": np.zeros(2),
        }
        self.last_state = None
        self.last_action = None
        self.last_log_prob = None
        self.action_list.clear()
        self.reward_list.clear()
        self.tracking_reward_list.clear()
        self.tracking_reward_2_list.clear()

    @property
    def params(self):
        all_params = {
            "f_min": self.f_min,
            "f_max": self.f_max,
            "delta_f_max": self.delta_f_max,
            "goal_delta": self.goal_delta,
        }
        all_params.update(self.ppo_trainer.params)
        return all_params

    def pack_state(self):
        """
        Packs the current state into a tuple for processing.

        Returns:
            A tuple containing the current state values.
        """
        return (
            np.mean(self.state["delta"]),
            np.mean(self.state["fg"]),
            np.mean(self.state["ccx"]),
            np.mean(self.state["ccy"]),
            np.mean(self.state["ft"]),
            np.mean(self.state["dft"]),
        )

    def save_model(self, path=r"actor_critic/rl_actor.pth"):
        """
        Saves the actor and critic models to a file.

        Args:
            path (str): The file path to save the models.
        """
        torch.save(
            {
                "actor_state_dict": self.actor.state_dict(),
                "critic_state_dict": self.critic.state_dict(),
                "ppo_trainer_state": self.ppo_trainer.__dict__,
            },
            path,
        )
    
    def pass_exp_data(self):
        return {
            "action_list": self.action_list,
            "reward_list": self.reward_list,
            "tracking_reward_list": self.tracking_reward_list,
            "tracking_reward_2_list": self.tracking_reward_2_list,
        }

    def save_train_results(self, dir):
        filename = f"{dir}/agent_train_result.csv"
        df = pd.DataFrame(self.results)
        df.to_csv(filename)