import abc

import gtimer as gt
from rlkit.core.rl_algorithm import BaseRLAlgorithm
from rlkit.data_management.replay_buffer import ReplayBuffer
from rlkit.samplers.data_collector import PathCollector
from rlkit.core import logger, eval_util

import numpy as np
import torch


"""
self.offline_rl = self.epoch < 0：负epoch表示离线学习，正epoch表示在线学习
offline_rl决定了是否将收集到的探索数据存入replay buffer
"""

class BatchRLAlgorithm(BaseRLAlgorithm, metaclass=abc.ABCMeta):
    def __init__(
            self,
            trainer,
            exploration_env,
            evaluation_env,
            exploration_data_collector: PathCollector,
            evaluation_data_collector: PathCollector,
            replay_buffer: ReplayBuffer,
            batch_size,
            max_path_length,
            num_epochs,
            eval_freq,
            num_eval_steps_per_epoch,
            num_expl_steps_per_train_loop,
            num_trains_per_train_loop,
            num_train_loops_per_epoch=1,
            min_num_steps_before_training=0,
            start_epoch=0, # negative epochs are offline, positive epochs are online
            expl_replay_buffer: ReplayBuffer = None,  # for jsrl
            jsrl_kwargs = None,  # for jsrl
    ):
        super().__init__(
            trainer,
            exploration_env,
            evaluation_env,
            exploration_data_collector,
            evaluation_data_collector,
            replay_buffer,
            expl_replay_buffer,  # for jsrl
        )
        self.batch_size = batch_size
        self.max_path_length = max_path_length
        self.num_epochs = num_epochs
        self.eval_freq = eval_freq
        self.num_eval_steps_per_epoch = num_eval_steps_per_epoch
        self.num_trains_per_train_loop = num_trains_per_train_loop
        self.num_train_loops_per_epoch = num_train_loops_per_epoch
        self.num_expl_steps_per_train_loop = num_expl_steps_per_train_loop
        self.min_num_steps_before_training = min_num_steps_before_training
        self._start_epoch = start_epoch

        # for jsrl
        self._guide_policy = None
        if jsrl_kwargs is not None:
            self._guide_policy_path = jsrl_kwargs.get('guide_policy_path', None)

            if self._guide_policy_path is not None:
                # load guide policy from path
                self._guide_policy = torch.load(self._guide_policy_path)['evaluation/policy']
                
                self._max_guide_steps = jsrl_kwargs.get('max_guide_steps', 0)
                self._curriculum_n = jsrl_kwargs.get('curriculum_n', 0)
                self._curriculum_threshold = jsrl_kwargs.get('curriculum_threshold', 0)

                self._horizons=np.arange(self._max_guide_steps, -1, -self._max_guide_steps // self._curriculum_n,)
                self._horizons_step = 0
                logger.log(f"Curriculum horizons: {self._horizons}")

                self._tolerated_reward = -np.inf  # 可以容忍的奖励

    @property
    def horizon(self):
        if self._guide_policy is not None:
            return self._horizons[self._horizons_step]
        else:
            return 0
        
    def update_horizon(self) -> None:
        """
        Update the horizon based on the current strategy.
        """
        # if "curriculum"
        self._horizons_step += 1
        self._horizons_step = min(self._horizons_step, len(self._horizons) - 1)
        # elif "random":
        # self.horizons = [np.random.choice(self._max_guide_steps)]
                
    def concatenate_batches(self, batch1, batch2):
        """Concatenate two batches of data along the first axis (batch dimension)."""
        concatenated_batch = {}
        
        # 确保两个批次有相同的键
        assert batch1.keys() == batch2.keys(), "Batches must have the same keys"
        
        for key in batch1.keys():
            # 检查是否是 numpy 数组（或其他可拼接的对象）
            if isinstance(batch1[key], np.ndarray):
                concatenated_value = np.concatenate([batch1[key], batch2[key]], axis=0)
            elif isinstance(batch1[key], (list, tuple)):
                concatenated_value = batch1[key] + batch2[key]
            else:
                raise ValueError(f"Cannot concatenate values of type {type(batch1[key])}")
            
            concatenated_batch[key] = concatenated_value
        
        return concatenated_batch

    # train
    def train(self):
        """Negative epochs are offline, positive epochs are online"""
        for self.epoch in gt.timed_for(
                range(self._start_epoch, self.num_epochs),
                save_itrs=True,
        ):
            self.offline_rl = self.epoch < 0
            self._begin_epoch(self.epoch)
            self._train()
            self._end_epoch(self.epoch)

    def _train(self):
        if self.epoch == 0 and self.min_num_steps_before_training > 0:
            init_expl_paths = self.expl_data_collector.collect_new_paths(
                self.max_path_length,
                self.min_num_steps_before_training,
                discard_incomplete_paths=False,
                guide_policy=self._guide_policy,
                guide_steps=self.horizon,
            )
            if not self.offline_rl:
                if self.expl_replay_buffer is None:
                    self.replay_buffer.add_paths(init_expl_paths)
                else:
                    self.expl_replay_buffer.add_paths(init_expl_paths)
            self.expl_data_collector.end_epoch(-1)

        if self.eval_freq > 0 and self.epoch % self.eval_freq == 0:
            eval_paths = self.eval_data_collector.collect_new_paths(
                self.max_path_length,
                self.num_eval_steps_per_epoch,
                discard_incomplete_paths=True,
            )
            gt.stamp('evaluation sampling')

            # === for jsrl 阶段引导步数调整策略 ===
            eval_path_info = eval_util.get_generic_path_information(eval_paths)
            returns_mean = eval_path_info.get('Returns Mean', 0)

            if self._guide_policy is not None:
                if self._tolerated_reward == -np.inf:
                    self._tolerated_reward = returns_mean
                elif returns_mean >= self._tolerated_reward * self._curriculum_threshold:
                    # 更新容忍奖励
                    self._tolerated_reward = returns_mean
                    # 更新课程学习阶段
                    self.update_horizon()
                    logger.log(f"Update horizon to {self.horizon} at epoch {self.epoch}, horizons step {self._horizons_step}, tolerated reward {self._tolerated_reward}")

                self.writer.add_scalar('jsrl/horizon', self.horizon, self.epoch)
                self.writer.add_scalar('jsrl/tolerated_reward', self._tolerated_reward, self.epoch)

        for _ in range(self.num_train_loops_per_epoch):
            # 离线学习时，不采样
            if not self.offline_rl:
                new_expl_paths = self.expl_data_collector.collect_new_paths(
                    self.max_path_length,
                    self.num_expl_steps_per_train_loop,
                    discard_incomplete_paths=False,
                    guide_policy=self._guide_policy,
                    guide_steps=self.horizon,
                )
                gt.stamp('exploration sampling', unique=False)

                if self.expl_replay_buffer is None:
                    self.replay_buffer.add_paths(new_expl_paths)
                else:
                    self.expl_replay_buffer.add_paths(new_expl_paths)
            gt.stamp('data storing', unique=False)

            self.training_mode(True)
            for _ in range(self.num_trains_per_train_loop):
                # train_data = self.replay_buffer.random_batch(self.batch_size)
                # buffer 策略
                if not self.offline_rl and self.expl_replay_buffer is not None:
                    offline_batch_size = self.batch_size // 4
                    online_batch_size = self.batch_size - offline_batch_size
                    train_data = self.concatenate_batches(
                        self.expl_replay_buffer.random_batch(online_batch_size),
                        self.replay_buffer.random_batch(offline_batch_size)
                    )
                else:
                    train_data = self.replay_buffer.random_batch(self.batch_size)
                self.trainer.train(train_data)
            gt.stamp('training', unique=False)
            self.training_mode(False)
