import torch
from gher.utils.launch_util import set_seed
import gher.utils.pytorch_util as ptu
from gher.utils.wrappers import NormalizedBoxEnv
from env.usv_env_default import UsvEnvDefault
from gher.trainer import SACTrainer, BatchRLAlgorithm
from gher.relabeler.usv_relabeler import USVRandomRelabeler
from gher.relabeler.relaber_test import RelabelerTest
from gher.network.network import LatentConditionedMlp
from gher.network.policy import LatentConditionedTanhGaussianPolicy, MakeDeterministicLatentPolicy
from gher.replay_buffer.multi_task_replay_buffer import MultiTaskReplayBuffer
from gher.collector.path_collect import TaskConditionedPathCollector
from gym.spaces import Discrete, MultiBinary
from gher.test_env import TestEnv

def sac_experiment(variant):
    ptu.set_gpu_mode(False, 0)
    set_seed(int(variant['seed']))  # 设定随机数种子
    torch.manual_seed(int(variant['seed']))
    # 初始化训expl和eval环境与relabeler
    if variant['env_name'] == 'usv_env_default':
        print('训练环境为：usv_env_default')
        expl_env = UsvEnvDefault(**variant['env_kwargs'])
        eval_env = UsvEnvDefault(**variant['env_kwargs'])
        relabeler_cls = USVRandomRelabeler
    elif variant['env_name'] == 'test_env':
        print('测试环境')
        expl_env = NormalizedBoxEnv(TestEnv(**variant['env_kwargs']))
        eval_env = NormalizedBoxEnv(TestEnv(**variant['env_kwargs']))
        relabeler_cls = RelabelerTest
    else:
        raise NotImplementedError
    # 提取维度
    if isinstance(expl_env.observation_space, Discrete) or isinstance(expl_env.observation_space, MultiBinary):
        obs_dim = expl_env.observation_space.n
    else:
        obs_dim = expl_env.observation_space.low.size
    action_dim = expl_env.action_space.low.size
    latent_dim = variant['replay_buffer_kwargs']['latent_dim']
    # 价值网络
    qf1 = LatentConditionedMlp(
        input_size=obs_dim + action_dim,
        latent_size=latent_dim,
        output_size=1,
        **variant['qf_kwargs']
    )
    qf2 = LatentConditionedMlp(
        input_size=obs_dim + action_dim,
        latent_size=latent_dim,
        output_size=1,
        **variant['qf_kwargs']
    )
    target_qf1 = LatentConditionedMlp(
        input_size=obs_dim + action_dim,
        latent_size=latent_dim,
        output_size=1,
        **variant['qf_kwargs']
    )
    target_qf2 = LatentConditionedMlp(
        input_size=obs_dim + action_dim,
        latent_size=latent_dim,
        output_size=1,
        **variant['qf_kwargs']
    )
    # 策略网络
    policy = LatentConditionedTanhGaussianPolicy(
        obs_dim=obs_dim,
        latent_dim=latent_dim,
        action_dim=action_dim,
        **variant['policy_kwargs']
    )
    expl_policy = policy
    eval_policy = MakeDeterministicLatentPolicy(policy)
    # 训练的算法
    trainer = SACTrainer(
        env=eval_env,
        policy=policy,
        qf1=qf1,
        qf2=qf2,
        target_qf1=target_qf1,
        target_qf2=target_qf2,
        **variant['trainer_kwargs']
    )
    # relabeler
    expl_relabeler = relabeler_cls(
        q1=qf1,
        q2=qf2,
        action_fn=eval_policy.wrapped_policy,
        **variant['relabeler_kwargs']
    )
    eval_relabeler = relabeler_cls(
        q1=qf1,
        q2=qf2,
        action_fn=eval_policy.wrapped_policy,
        **variant['relabeler_kwargs'],
        is_eval=True
    )
    # 初始化经验回放缓冲区
    replay_buffer = MultiTaskReplayBuffer(
        env=expl_env,
        relabeler=expl_relabeler,
        **variant['replay_buffer_kwargs']
    )
    expl_path_collector = TaskConditionedPathCollector(
        expl_env,
        expl_policy,
        expl_relabeler,
        # calculate_rewards=False,
        **variant['path_collector_kwargs']
    )
    eval_path_collector = TaskConditionedPathCollector(
        eval_env,
        eval_policy,
        eval_relabeler,
        is_eval=False,  # variant['plot'],  # will attempt to plot if it's the pointmass
        **variant['path_collector_kwargs']
    )
    runner = BatchRLAlgorithm(
        trainer=trainer,
        exploration_env=expl_env,
        evaluation_env=eval_env,
        exploration_data_collector=expl_path_collector,
        evaluation_data_collector=eval_path_collector,
        replay_buffer=replay_buffer,
        **variant['algo_kwargs']
    )
    runner.to(ptu.device)
    runner.train()













