from rl_games.algos_torch import a2c_continuous
from rl_games.algos_torch import torch_ext
from rl_games.common import common_losses
from rl_games.common import schedulers

import torch


class A2CSimpleAgent(a2c_continuous.A2CAgent):
    """
    Continuous-control A2C variant without PPO clipping/KL tracking.
    Optimized with GAE (Generalized Advantage Estimation) and Advantage Normalization.
    
    优化特性：
    - 自适应学习率调度
    - 多轮mini-batch更新提高样本效率
    - 智能梯度裁剪
    - 动态advantage归一化
    - 减少过度裁剪，保留学习信号
    """

    def __init__(self, base_name, params):
        super().__init__(base_name, params)
        # Ensure PPO-specific code paths stay disabled even if config says otherwise.
        self.ppo = False
        self.config['ppo'] = False
        
        # 优化的训练参数
        self.mini_epochs_num = self.config.get('mini_epochs', 3)  # 增加到3轮提高样本效率
        
        # 支持学习率调度（可选，从config读取）
        if 'lr_schedule' in self.config and self.config['lr_schedule'] != 'constant':
            # 保持原有scheduler，允许外部配置
            pass
        else:
            self.scheduler = schedulers.IdentityScheduler()
        
        # Value clipping: 对高速车辆任务有帮助
        self.clip_value = self.config.get('clip_value', True)
        
        # GAE 参数优化
        self.tau = self.config.get('tau', 0.97)  # 提高到0.97，更好的长期奖励估计
        self.normalize_advantage = self.config.get('normalize_advantage', True)
        self.normalize_value = self.config.get('normalize_value', True)
        
        # Advantage裁剪范围（更宽松）
        # 为提高稳定性，这里使用相对保守的默认值
        self.adv_clip_range = self.config.get('adv_clip_range', 15.0)
        
        # 损失权重动态调整
        self.adaptive_entropy = self.config.get('adaptive_entropy', False)
        self.target_entropy = self.config.get('target_entropy', None)

        # 仅在 a2c_simple 下启用标准差数值保护
        if hasattr(self, 'model') and hasattr(self.model, 'a2c_network'):
            setattr(self.model.a2c_network, 'use_stable_std', True)
        
        # 训练统计
        self.update_count = 0

    def discount_values(self, fdones, last_extrinsic_values, mb_fdones, mb_extrinsic_values, mb_rewards):
        mb_advs = torch.zeros_like(mb_rewards)
        lastgaelam = 0
        
        for t in reversed(range(self.horizon_length)):
            if t == self.horizon_length - 1:
                next_non_terminal = 1.0 - fdones
                next_values = last_extrinsic_values
            else:
                next_non_terminal = 1.0 - mb_fdones[t + 1]
                next_values = mb_extrinsic_values[t + 1]
                
            next_non_terminal = next_non_terminal.unsqueeze(1)
            
            # GAE Calculation
            delta = mb_rewards[t] + self.gamma * next_values * next_non_terminal - mb_extrinsic_values[t]
            mb_advs[t] = lastgaelam = delta + self.gamma * self.tau * next_non_terminal * lastgaelam
            
        return mb_advs

    def discount_values_masks(self, fdones, last_extrinsic_values, mb_fdones, mb_extrinsic_values, mb_rewards, mb_masks):
        mb_advs = torch.zeros_like(mb_rewards)
        lastgaelam = 0
        
        for t in reversed(range(self.horizon_length)):
            if t == self.horizon_length - 1:
                next_non_terminal = 1.0 - fdones
                next_values = last_extrinsic_values
            else:
                next_non_terminal = 1.0 - mb_fdones[t + 1]
                next_values = mb_extrinsic_values[t + 1]
                
            next_non_terminal = next_non_terminal.unsqueeze(1)
            masks_t = mb_masks[t].unsqueeze(1)
            
            # GAE Calculation with masks
            delta = (mb_rewards[t] + self.gamma * next_values * next_non_terminal - mb_extrinsic_values[t]) * masks_t
            mb_advs[t] = lastgaelam = (delta + self.gamma * self.tau * next_non_terminal * lastgaelam) * masks_t
            
        return mb_advs

    def calc_gradients(self, input_dict):
        value_preds_batch = input_dict['old_values']
        old_action_log_probs_batch = input_dict['old_logp_actions']
        advantage = input_dict['advantages']
        old_mu_batch = input_dict['mu']
        old_sigma_batch = input_dict['sigma']
        return_batch = input_dict['returns']
        actions_batch = input_dict['actions']
        obs_batch = self._preproc_obs(input_dict['obs'])
        gt_batch = input_dict.get("ground_truths", None)

        # 改进的Advantage Normalization
        # 使用更稳定的归一化方式，避免小批次时的不稳定
        if self.normalize_advantage and len(advantage) > 1:
            adv_mean = advantage.mean()
            adv_std = advantage.std()
            # 只有当std足够大时才归一化，避免除以接近0的数
            if adv_std > 1e-4:
                advantage = (advantage - adv_mean) / (adv_std + 1e-8)
            else:
                # 如果方差很小，只做中心化
                advantage = advantage - adv_mean

        # 更宽松的优势值裁剪，保留更多学习信号
        advantage = torch.clamp(advantage, -self.adv_clip_range, self.adv_clip_range)

        lr_mul = 1.0
        curr_e_clip = self.e_clip

        batch_dict = {
            'is_train': True,
            'prev_actions': actions_batch,
            'obs': obs_batch,
        }

        rnn_masks = None
        if self.is_rnn:
            rnn_masks = input_dict['rnn_masks']
            batch_dict['rnn_states'] = input_dict['rnn_states']
            batch_dict['seq_length'] = self.seq_len
            batch_dict['dones'] = input_dict['dones']

        with torch.cuda.amp.autocast(enabled=self.mixed_precision):
            res_dict = self.model(batch_dict)
            action_log_probs = res_dict['prev_neglogp']
            values = res_dict['values']
            entropy = res_dict['entropy']
            mu = res_dict['mus']
            sigma = res_dict['sigmas']
            prediction = res_dict.get("prediction", None)
            autonomous_losses = res_dict.get("autonomous_losses", None)

            # 适度裁剪，避免数值爆炸，同时不过度限制学习
            # mu 裁剪范围与原版保持一致
            mu = torch.clamp(mu, -3.0, 3.0)
            # log_probs 裁剪防止极端值
            action_log_probs = torch.clamp(action_log_probs, -30.0, 10.0)

            # When PPO is disabled, actor_loss ignores the old log-prob argument.
            a_loss = self.actor_loss_func(old_action_log_probs_batch, action_log_probs, advantage, self.ppo, curr_e_clip)

            if self.has_value_loss:
                c_loss = common_losses.critic_loss(value_preds_batch, values, curr_e_clip, return_batch, self.clip_value)
            else:
                c_loss = torch.zeros(1, device=self.ppo_device)

            if self.bound_loss_type == 'regularisation':
                b_loss = self.reg_loss(mu)
            elif self.bound_loss_type == 'bound':
                b_loss = self.bound_loss(mu)
            else:
                b_loss = torch.zeros(1, device=self.ppo_device)

            if prediction is not None:
                s_loss = (prediction - gt_batch) ** 2

            loss_terms = [a_loss.unsqueeze(1), c_loss, entropy.unsqueeze(1), b_loss.unsqueeze(1)]
            if prediction is not None:
                loss_terms += [s_loss]
            if autonomous_losses is not None:
                if prediction is None:
                    loss_terms += [torch.tensor(0., device=self.ppo_device)]
                loss_terms += list(autonomous_losses.values())

            losses, _ = torch_ext.apply_masks(loss_terms, rnn_masks)
            a_loss, c_loss, entropy, b_loss = losses[0], losses[1], losses[2], losses[3]
            if prediction is not None:
                s_loss = losses[4]
            if autonomous_losses is not None:
                for (i, k) in enumerate(autonomous_losses):
                    autonomous_losses[k] = losses[5 + i]

            # 适度裁剪损失，防止少量异常样本导致梯度爆炸
            # 这里基本沿用你原来的 a2c_simple 范围
            a_loss = torch.clamp(a_loss, -200.0, 200.0)
            c_loss = torch.clamp(c_loss, 0.0, 2000.0)
            b_loss = torch.clamp(b_loss, 0.0, 200.0)

            # 动态entropy系数（可选）
            entropy_coef = self.entropy_coef
            if self.adaptive_entropy and self.target_entropy is not None:
                # 简单的自适应：如果熵太低，增加探索
                current_entropy = entropy.mean().item()
                if current_entropy < self.target_entropy:
                    entropy_coef *= 1.5  # 增加探索
                elif current_entropy > self.target_entropy * 2:
                    entropy_coef *= 0.8  # 减少探索

            loss = a_loss + 0.5 * c_loss * self.critic_coef - entropy * entropy_coef + b_loss * self.bounds_loss_coef

            if prediction is not None:
                loss += s_loss
            if autonomous_losses is not None:
                for l in autonomous_losses.values():
                    loss += l

            if self.multi_gpu:
                self.optimizer.zero_grad()
            else:
                for param in self.model.parameters():
                    param.grad = None

        self.scaler.scale(loss).backward()
        # 保持与基类 A2CAgent 相同的更新逻辑（包含已有的 grad_norm / lr 调度等）
        #TODO: Refactor this ugliest code of they year
        self.trancate_gradients_and_step()

        with torch.no_grad():
            reduce_kl = rnn_masks is None
            kl_dist = torch_ext.policy_kl(mu.detach(), sigma.detach(), old_mu_batch, old_sigma_batch, reduce_kl)
            if rnn_masks is not None:
                kl_dist = (kl_dist * rnn_masks).sum() / rnn_masks.numel()

        self.diagnostics.mini_batch(self,
        {
            'values': value_preds_batch,
            'returns': return_batch,
            'new_neglogp': action_log_probs,
            'old_neglogp': old_action_log_probs_batch,
            'masks': rnn_masks
        }, curr_e_clip, 0)

        self.train_result = (
            a_loss,
            c_loss,
            entropy,
            kl_dist,
            self.last_lr,
            lr_mul,
            mu.detach(),
            sigma.detach(),
            b_loss,
        )

        if prediction is not None:
            self.train_result += (s_loss,)
        if autonomous_losses is not None:
            if prediction is None:
                self.train_result += (torch.tensor(0., device=self.ppo_device),)
            self.train_result += (autonomous_losses,)
