# -*- coding: utf-8 -*-

from rl_typing import *
from abc import abstractmethod

'''算法定义'''
import os
import numpy as np
import torch as th
import torch.nn as nn
from torch.nn import functional as F
from copy import deepcopy
import time

state_number = 7
action_number = 2
MAX_EPISODE = 1000
LEARN_FREQ = 10
OUTPUT_FREQ = 50
lr_actor = 3e-4    # 降低学习率
lr_critic = 1e-3   # 降低学习率
lr_alpha = 3e-4
gamma = 0.99       # 增加折扣因子
tau = 5e-3         # 降低软更新参数

UpdateAfter = 2000
MemoryCapacity = 100000
batch_size = 128

class BaseBuffer:
    """ReplayBuffer基类, 需根据具体任务完善相应功能"""

    obs_space: ObsSpace
    act_space: ActSpace
    device: DeviceLike = 'cpu'

    # 0.重置
    @abstractmethod
    def reset(self, *args, **kwargs):
        """重置replay buffer"""
        raise NotImplementedError
    
    @property
    def is_rnn(self) -> bool:
        """是否RNN replay"""
        return False
    
    @property
    def nbytes(self) -> int:
        """buffer占用的内存"""
        return 0
    
    # 1.存储
    @abstractmethod
    def push(
        self, 
        transition: tuple[Obs, Act, float, Obs, bool], 
        terminal: bool = None, 
        **kwargs
    ):
        """存入一条样本\n
            transition = (state, action, reward, next_state, done)
            terminal 用于控制 DRQN 的 EPISODE REPLAY
        """
        raise NotImplementedError
    
    def __len__(self) -> int:
        """当前buffer容量"""
        return 0
    
    def extend(
        self, 
        transition_list: list[tuple[Obs, Act, float, Obs, bool]], 
        terminal_list: list[bool] = None, 
        **kwargs
    ):
        """存入一批样本\n
            extend(List[(state, action, reward, next_state, done)], List[terminal])
        """
        for transition, terminal in zip(transition_list, terminal_list):
            self.push(transition, terminal)

    # 2.采样
    @abstractmethod
    def sample(
        self, 
        batch_size: int = 1, 
        *,
        idxs: ListLike = None,
        rate: float = None,
        **kwargs,
    ) -> dict[str, Union[ObsBatch, ActBatch, th.FloatTensor]]:
        """随机采样

        Args
        ----------
        batch_size : int, optional
            样本容量, 默认1.
        
        KwArgs
        ----------
        idxs : ListLike, optional
            若传入样本索引, 则按索引采样(此时batch_size不起作用), 否则根据样本容量随机生成索引, 默认None.
        rate : float, optional
            用于PER更新参数 beta, 默认None.
            rate = learn_steps / max_train_steps
            beta = beta0 + (1-beta0) * rate

        Returns
        -------
        Dict[str, Union[ObsBatch, ActBatch, th.FloatTensor]]
            要求返回key为 "s", "a", "r", "s_", "done", "IS_weight", ... 的GPU版Tensor/MixedTensor存储形式
        """  
        raise NotImplementedError

    def __getitem__(self, index):
        """索引样本\n
           即 batch = buffer[index] 与 batch = buffer.sample(idxs=index) 效果相同
        """
        if isinstance(index, int): index = [index]
        return self.sample(idxs=index)
    
    # 3.PER功能
    def update_priorities(self, td_errors: np.ndarray):
        """使用TD误差更新PER优先级"""
        pass
    
    @property
    def is_per(self) -> bool:
        """是否是PER缓存"""
        return False
    
    # 4.一个Obs转换成样本
    @abstractmethod
    def state_to_tensor(self, state: Obs, use_rnn=False) -> ObsBatch:
        """算法的select_action和export接口调用, 用于将1个state转换成batch_size=1的张量
        use_rnn = False : (*state_shape, ) -> (1, *state_shape)
        use_rnn = True : (*state_shape, ) -> (1, 1, *state_shape)
        """
        raise NotImplementedError
        # TODO 若想支持混合动作空间, 需定义 action_to_numpy 方法
    
    # 5.IO接口
    def save(self, data_dir: PathLike, buffer_id: Union[int, str] = None):
        """存储buffer\n
        存储在 data_dir / buffer_id 或 data_dir 中
        """
        pass

    def load(self, data_dir: PathLike, buffer_id: Union[int, str] = None):
        """读取buffer\n
        存储在 data_dir / buffer_id 或 data_dir 中
        """
        pass

    # 6.PyTorch功能
    def to(self, device: DeviceLike):
        """返回的样本张量设置到device上"""
        self.device = device
        return self

    def cuda(self, cuda_id=None):
        """返回的样本设置为cuda张量"""
        device = 'cpu' if not th.cuda.is_available() else 'cuda' if cuda_id is None else 'cuda:' + str(cuda_id)
        self.to(device)
        return self

    def cpu(self):
        """返回的样本设置为cpu张量"""
        self.to('cpu')
        return self

# 1.定义经验回放（取决于观测和动作数据结构）
class Buffer(BaseBuffer):
    def __init__(self, memory_size, obs_space, act_space):
        super(Buffer, self).__init__()
        # 数据类型表示
        self.device = 'cuda'
        self.obs_space = obs_space
        self.act_space = act_space
        # buffer属性
        self._ptr = 0    # 当前位置
        self._idxs = [0] # PER记住上一次采样位置, 一维list或ndarray
        self._memory_size = int(memory_size) # 总容量
        self._current_size = 0               # 当前容量
        # buffer容器
        self._data = {}
        self._data["s"] = np.empty((memory_size, *obs_space.shape), dtype=obs_space.dtype)  
        self._data["a"] = np.empty((memory_size, *act_space.shape), dtype=act_space.dtype)
        self._data["r"] = np.empty((memory_size, 1), dtype=np.float32)
        self._data["s_"] = deepcopy(self._data["s"])
        self._data["done"] = np.empty((memory_size, 1), dtype=bool)
    
    def reset(self, *args, **kwargs):
        self._ptr = 0
        self._idxs = [0]
        self._current_size = 0

    @property
    def nbytes(self):
        return sum(x.nbytes for x in self._data.values())

    def push(self, transition, terminal=None, **kwargs):
        self._data["s"][self._ptr] = transition[0]
        self._data["a"][self._ptr] = transition[1]
        self._data["r"][self._ptr] = transition[2]
        self._data["s_"][self._ptr] = transition[3]
        self._data["done"][self._ptr] = transition[4]
        # update
        self._ptr = (self._ptr + 1) % self._memory_size                     # 更新指针
        self._current_size = min(self._current_size + 1, self._memory_size) # 更新容量

    def __len__(self):
        return self._current_size 
    
    def sample(self, batch_size=1, *, idxs=None, rate=None, **kwargs):
        self._idxs = idxs or np.random.choice(self._current_size, size=batch_size, replace=False)
        batch = {
            's': th.FloatTensor(self._data['s'][self._idxs]).to(self.device),
            'a': th.FloatTensor(self._data['a'][self._idxs]).to(self.device),
            'r': th.FloatTensor(self._data['r'][self._idxs]).to(self.device),
            's_': th.FloatTensor(self._data['s_'][self._idxs]).to(self.device),
            'done': th.FloatTensor(self._data['done'][self._idxs]).to(self.device),
        }
        return batch

# 2.定义神经网络（取决于观测数据结构）
# 混合观测编码器
class EncoderNet(nn.Module):
    def __init__(self, obs_space, feature_dim):
        super(EncoderNet, self).__init__()
        # 点云测距编码
        c, cnn_dim = obs_space['seq_points'].shape
        in_kernel_size = min(cnn_dim//2, 8)
        in_stride = min(cnn_dim-in_kernel_size, 4)
        self.cnn = nn.Sequential(
            nn.Conv1d(c, 32, kernel_size=in_kernel_size, stride=in_stride, padding=0),
            nn.ReLU(inplace=True),
            nn.Conv1d(32, 64, kernel_size=4, stride=2, padding=0),
            nn.ReLU(inplace=True),
            nn.Conv1d(64, 64, kernel_size=3, stride=1, padding=0),
            nn.ReLU(inplace=True),
            nn.Flatten(),
        )
        cnn_out_dim = self._get_cnn_out_dim(self.cnn, (c, cnn_dim))
        self.cnn_mlp = nn.Sequential(
            nn.Linear(cnn_out_dim, feature_dim),
            nn.ReLU(True),
        )
        # 状态向量编码
        _, rnn_dim = obs_space['seq_vector'].shape
        rnn_hidden_dim = 256
        rnn_num_layers = 1
        self.rnn_mlp1 = nn.Sequential(
            nn.Linear(rnn_dim, rnn_hidden_dim),
            nn.ReLU(True),
        )
        self.rnn = nn.GRU(rnn_hidden_dim, rnn_hidden_dim, rnn_num_layers, batch_first=True)
        self.rnn_mlp2 = nn.Sequential(
            nn.Linear(rnn_hidden_dim, feature_dim),
            nn.ReLU(True),
        )
        # 特征融合网络
        self.fusion = nn.Sequential(
            nn.Linear(2*feature_dim, feature_dim),
            nn.ReLU(True),
        )

    def forward(self, obs):
        f1 = self.cnn_mlp(self.cnn(obs['seq_points'])) # batch, dim
        f2_n, _ = self.rnn(self.rnn_mlp1(obs['seq_vector']), None) # batch, seq, dim
        f2 = self.rnn_mlp2(f2_n[:, -1, :]) # batch, dim
        return self.fusion(th.cat([f1, f2], dim=-1)) # batch, dim
    
    @staticmethod
    def _get_cnn_out_dim(cnn: nn.Module, input_shape: tuple[int, ...]):
        # out_dim = (in_dim + 2*pad - dilation*(k_size-1) -1 ) / stride + 1
        cnn_copy = deepcopy(cnn).to('cpu')
        output = cnn_copy(th.zeros(1, *input_shape))
        return int(np.prod(output.size()))


class Ornstein_Uhlenbeck_Noise:
    def __init__(self, mu, sigma=0.1, theta=0.1, dt=1e-2, x0=None):
        self.theta = theta
        self.mu = mu
        self.sigma = sigma
        self.dt = dt
        self.x0 = x0
        self.reset()

    def __call__(self):
        x = self.x_prev + \
            self.theta * (self.mu - self.x_prev) * self.dt + \
            self.sigma * np.sqrt(self.dt) * np.random.normal(size=self.mu.shape)
        '''
        后两行是dXt，其中后两行的前一行是θ(μ-Xt)dt，后一行是σεsqrt(dt)
        '''
        self.x_prev = x
        return x

    def reset(self):
        if self.x0 is not None:
            self.x_prev = self.x0
        else:
            self.x_prev = np.zeros_like(self.mu)

# 策略函数
class PiNet(nn.Module):
    def __init__(self, input_dim, output_dim):
        super(PiNet, self).__init__()
        self.net = nn.Sequential(
            nn.Linear(input_dim, 256),
            nn.LayerNorm(256),
            nn.ReLU(),
            nn.Linear(256, 256),
            nn.LayerNorm(256),
            nn.ReLU(),
        )
        self.mu = nn.Linear(256, output_dim)
        self.log_std = nn.Linear(256, output_dim)
        
        # 初始化权重
        for m in self.modules():
            if isinstance(m, nn.Linear):
                nn.init.orthogonal_(m.weight, gain=np.sqrt(2))
                nn.init.constant_(m.bias, 0)

    def forward(self, x):
        x = self.net(x)
        mu = th.tanh(self.mu(x))
        log_std = self.log_std(x)
        log_std = th.clamp(log_std, -20, 2)
        std = log_std.exp()
        return mu, std


# Q函数
class QNet(nn.Module):
    def __init__(self, input_dim, output_dim):
        super(QNet, self).__init__()
        # 共享特征提取层
        self.feature = nn.Sequential(
            nn.Linear(input_dim + output_dim, 256),
            nn.LayerNorm(256),
            nn.ReLU(),
            nn.Linear(256, 256),
            nn.LayerNorm(256),
            nn.ReLU(),
        )
        
        # 两个Q网络
        self.q1 = nn.Linear(256, 1)
        self.q2 = nn.Linear(256, 1)
        
        # 初始化权重
        for m in self.modules():
            if isinstance(m, nn.Linear):
                nn.init.orthogonal_(m.weight, gain=np.sqrt(2))
                nn.init.constant_(m.bias, 0)

    def forward(self, s, a):
        x = th.cat([s, a], dim=1)
        x = self.feature(x)
        q1 = self.q1(x)
        q2 = self.q2(x)
        return q1, q2

# Actor
class SAC_Actor():
    def __init__(self, lr_actor):
        self.action_net = PiNet(state_number, action_number)
        self.optimizer = th.optim.Adam(self.action_net.parameters(), lr=lr_actor)
        self.device = 'cuda' if th.cuda.is_available() else 'cpu'

    def to(self, device: DeviceLike):
        """算法device设置"""
        self.device = device
        self.action_net.to(device)
        return self

    def cuda(self):
        """算法device转换到cuda上"""
        device = th.device("cuda" if th.cuda.is_available() else "cpu")
        self.to(device)
        return self

    def choose_action(self, s):
        """选择动作，用于实际执行"""
        inputstate = th.FloatTensor(s).to(self.device)
        mu, std = self.action_net(inputstate)
        dist = th.distributions.Normal(mu, std)
        action = dist.sample()
        action = th.tanh(action)  # 使用 tanh 将动作限制在 [-1, 1] 范围内
        return action.cpu().detach().numpy()
    
    def evaluate(self, s):
        """评估动作和计算对数概率，用于训练"""
        mu, std = self.action_net(s)
        dist = th.distributions.Normal(mu, std)
        # 使用重参数化技巧采样
        noise = th.randn_like(mu)
        action = th.tanh(mu + std * noise)
        
        # 计算动作的对数概率
        # 1. 计算 tanh 变换前的对数概率
        log_prob = dist.log_prob(mu + std * noise)
        # 2. 计算 tanh 变换的雅可比行列式的对数
        log_prob = log_prob - th.log(1 - action.pow(2) + 1e-6)
        # 3. 对动作维度求和
        log_prob = log_prob.sum(1, keepdim=True)
        
        return action, log_prob

    def learn(self, actor_loss):
        """更新策略网络"""
        self.optimizer.zero_grad()
        actor_loss.backward()
        self.optimizer.step()

# Critic
class SAC_Critic():
    def __init__(self, lr_critic):
        self.critic_v = QNet(state_number, action_number) # Q网络输入状态和动作，生成一个Q值
        self.target_v = QNet(state_number, action_number) # 目标Q网络输入状态和动作，生成一个Q值
        self.target_v.load_state_dict(self.critic_v.state_dict())
        self.target_v.eval()  # 设置目标网络为评估模式
        self.optimizer = th.optim.Adam(self.critic_v.parameters(), lr=lr_critic, eps=1e-5)
        self.lossfunc = nn.MSELoss() # Q网络损失函数类型

    def to(self, device: DeviceLike):
        """算法device设置"""
        self.device = device
        self.critic_v.to(device)
        self.target_v.to(device)
        return self

    def cuda(self):
        """算法device转换到cuda上"""
        device = th.device("cuda" if th.cuda.is_available() else "cpu")
        self.to(device)
        return self

    def soft_update(self):
        """软更新目标网络"""
        for target_param, param in zip(self.target_v.parameters(), self.critic_v.parameters()):
            target_param.data.copy_(target_param.data * (1.0 - tau) + param.data * tau)

    def get_v(self, s, a):
        """获取当前Q值"""
        return self.critic_v(s, a)
    
    def get_target_v(self, s, a):
        """获取目标Q值"""
        with th.no_grad():  # 目标网络不需要梯度
            return self.target_v(s, a)

    def learn(self, current_q1, current_q2, target_q):
        """更新Q网络"""
        # 计算两个Q网络的损失
        loss = self.lossfunc(current_q1, target_q) + self.lossfunc(current_q2, target_q)
        # 更新网络
        self.optimizer.zero_grad()
        loss.backward()
        self.optimizer.step()

class Entropy():
    def __init__(self, lr_alpha):
        self.target_entropy = -action_number  # 目标熵值，通常设为动作维度的负值
        self.log_alpha = th.zeros(1, requires_grad=True, device='cuda')  # 初始化 log_alpha 在 GPU 上
        self.alpha = self.log_alpha.exp()  # alpha = exp(log_alpha)
        self.optimizer = th.optim.Adam([self.log_alpha], lr=lr_alpha)

    def to(self, device: DeviceLike):
        """算法device设置"""
        self.device = device
        self.log_alpha = self.log_alpha.to(device)
        self.alpha = self.alpha.to(device)
        return self

    def cuda(self):
        """算法device转换到cuda上"""
        device = th.device("cuda" if th.cuda.is_available() else "cpu")
        self.to(device)
        return self
    
    def learn(self, entropy_loss):
        """更新温度参数 alpha"""
        self.optimizer.zero_grad()
        entropy_loss.backward()
        self.optimizer.step()
        # 更新 alpha 值
        self.alpha = self.log_alpha.exp()


'''实例化环境'''
from path_plan_env import DynamicPathPlanning
env = DynamicPathPlanning(dt=0.1)
obs_space = env.observation_space
act_space = env.action_space

if __name__ == '__main__':
    # TensorBoard 设置
    from torch.utils.tensorboard import SummaryWriter
    starttime = time.strftime("%Y-%m-%d_%H:%M:%S")
    log = SummaryWriter(log_dir = "./tb_log/"+starttime[:13], comment=starttime[:13], flush_secs=60)

    # 训练相关代码
    device = th.device("cuda" if th.cuda.is_available() else "cpu")
    print(f"使用设备: {device}")

    memory = Buffer(MemoryCapacity, obs_space, act_space)
    actor = SAC_Actor(lr_actor)
    actor.cuda()
    critic = SAC_Critic(lr_critic)
    critic.cuda()
    entropy = Entropy(lr_alpha)
    entropy.cuda()

    for episode in range(MAX_EPISODE):
        ## 重置回合累计奖励
        reward_total = 0
        reward_total_edge = 0
        reward_total_obstacle = 0
        reward_total_goal = 0
        ## 获取初始观测
        obs = env.reset()
        ## 初始化噪声
        ou_noise = Ornstein_Uhlenbeck_Noise(mu=np.zeros(act_space.shape[0]))
        ## 初始化action
        act = np.zeros(act_space.shape[0])
        ## 进行一回合仿真
        for steps in range(env.max_episode_steps):
            # 可视化
            # env.render()
            # 决策
            act = actor.choose_action(obs)
            if episode <= 50:
                noise = ou_noise()
            else:
                noise = 0
            act += noise
            act = np.clip(act, env.action_space.low[0], env.action_space.high[0]) # 限制动作范围
            # 仿真
            next_obs, reward, done, edge_r, obstacle_r, goal_r, info = env.step(act)
            # 记忆库储存
            memory.push((obs, act, reward, next_obs, done))
            # 当存储数据大于预先设定的大小后开始训练
            if memory._current_size > UpdateAfter:
                batch = memory.sample(batch_size)
                s, a, r, s_, done = batch['s'], batch['a'], batch['r'], batch['s_'], batch['done']
                
                # 归一化奖励
                r = (r - r.mean()) / (r.std() + 1e-8)
                
                # Critic update
                with th.no_grad():
                    a_, log_prob_ = actor.evaluate(s_)
                    target_q1, target_q2 = critic.get_target_v(s_, a_)
                    q_next = th.min(target_q1, target_q2)
                    target_q = r + (1 - done) * gamma * (q_next - entropy.alpha * log_prob_)
                
                q1_curr, q2_curr = critic.get_v(s, a)
                critic.learn(q1_curr, q2_curr, target_q.detach())
                
                # Actor update
                action, log_prob = actor.evaluate(s)
                q1, q2 = critic.get_v(s, action)
                q = th.min(q1, q2)
                actor_loss = (entropy.alpha * log_prob - q).mean()
                actor.learn(actor_loss)
                
                # Alpha update
                alpha_loss = -(entropy.log_alpha.exp() * (log_prob + entropy.target_entropy).detach()).mean()
                entropy.learn(alpha_loss)
                entropy.alpha = entropy.log_alpha.exp()
                
                # Soft update
                critic.soft_update()
            # 累积奖励
            reward_total += reward
            reward_total_edge += edge_r
            reward_total_obstacle += obstacle_r
            reward_total_goal += goal_r
            # 回合结束
            if info["terminal"]:
                mean_reward = reward_total / (steps + 1)
                print(
                    "回合: ", episode,
                    "| 累积奖励: ", round(reward_total, 2),
                    "| 平均奖励: ", round(mean_reward, 2),
                    "| 边界奖励: ", round(reward_total_edge, 2),
                    "| 障碍奖励: ", round(reward_total_obstacle, 2),
                    "| 目标奖励: ", round(reward_total_goal, 2),
                    "| 状态: ", info["state"],
                    "| 步数: ", steps,
                )
                break
            else:
                obs = deepcopy(next_obs)
        # end for
        ## 记录
        log.add_scalar('TotalReward', reward_total, episode)
        log.add_scalar('MeanReward', mean_reward, episode)
        log.add_scalar('EdgeReward', reward_total_edge, episode)
        log.add_scalar('ObstacleReward', reward_total_obstacle, episode)
        log.add_scalar('GoalReward', reward_total_goal, episode)

        ## 画图
        if episode % OUTPUT_FREQ == 0:
            env.plot(f"./output/out{episode}.png")

        if episode % 20 == 0 and episode > 200:#保存神经网络参数
            os.makedirs("./models", exist_ok=True)
            save_data = {'net': actor.action_net.state_dict(), 'opt': actor.optimizer.state_dict()}
            th.save(save_data, "./models/Path_SAC_actor_L1.pth")
