import argparse
import torch
import time
import os, sys, tempfile
import math
import numpy as np
from gym.spaces import Box, Discrete
from pathlib import Path
from torch.autograd import Variable
from utils.make_env import make_env
from utils.tb_log import configure_tb, log_and_print
from utils.buffer import ReplayBuffer
from algorithms.maddpg import MATD3
from algorithms.maddpg_cc import MATD3CC
from tensorboard_logger import log_value
import datetime
import random
import copy
import shutil
from utils.vae_class import VAE
from utils.controller import SlidingWindow, DecayThenFlatSchedule


from utils.env_wrappers import SubprocVecEnv, DummyVecEnv
import h5py

try:
    from multiagent_mujoco.mujoco_multi import MujocoMulti
except:
    print ('MujocoMulti not installed')

def make_parallel_env(env_id, n_rollout_threads, seed, discrete_action):
    def get_env_fn(rank):
        def init_env():
            env = make_env(env_id, discrete_action=discrete_action)
            env.seed(seed + rank * 1000)
            np.random.seed(seed + rank * 1000)
            return env
        return init_env
    if n_rollout_threads == 1:
        return DummyVecEnv([get_env_fn(0)])
    else:
        return SubprocVecEnv([get_env_fn(i) for i in range(n_rollout_threads)])

def eval_policy(agent, env_name, seed, eval_episodes, discrete_action, env_args=None):
    if env_name in ['HalfCheetah-v2']:
        env = MujocoMulti(env_args=env_args)
        env.seed(seed + 100)

        all_episodes_rewards = []
        for ep_i in range(eval_episodes):
            agent.prep_rollouts(device='cpu')

            env.reset()
            done = False
            episode_reward = 0.
            # episode_num=0
            # r30 = 0
            while not done:
                obs = env.get_obs()
                torch_obs = [Variable(torch.Tensor(obs[i]).unsqueeze(0), requires_grad=False) for i in range(agent.nagents)] 
                torch_agent_actions = agent.step(torch_obs, explore=False)
                agent_actions = [ac.data.numpy() for ac in torch_agent_actions]
                actions = [ac.squeeze(0) for ac in agent_actions]
                
                reward, done, info = env.step(actions)
                # if episode_num<30:
                #     r30+=reward
                # episode_num += 1    
                episode_reward += reward
            # print(r30)
            all_episodes_rewards.append(episode_reward)
        
        mean_episode_reward = np.mean(np.array(all_episodes_rewards))
        return mean_episode_reward
    else:
        avg_predator_return = 0.
    
        env = make_parallel_env(env_name, 1, seed + 100, discrete_action)

        for ep_i in range(0, eval_episodes):
            obs = env.reset()
            agent.prep_rollouts(device='cpu')

            for et_i in range(config.episode_length):
                obs_len = agent.nagents
                if env_name in ['simple_tag', 'simple_world'] and "CC" in agent.__class__.__name__:
                    obs_len += agent.num_preys
                torch_obs = [Variable(torch.Tensor(np.vstack(obs[:, i])), requires_grad=False) for i in range(obs_len)]
                torch_agent_actions = agent.step(torch_obs, explore=False)
                agent_actions = [ac.data.numpy() for ac in torch_agent_actions]
                actions = [[ac[i] for ac in agent_actions] for i in range(config.n_rollout_threads)]
                next_obs, rewards, dones, infos = env.step(actions)
                
                if env_name in ['simple_tag', 'simple_world']:
                    avg_predator_return += rewards[0][0]
                else:
                    avg_agent_reward = np.mean(rewards[0])
                    avg_predator_return += avg_agent_reward

                obs = next_obs

        avg_predator_return /= eval_episodes
        return avg_predator_return


def train(config):
    # 在训练开始前记录开始时间
    start_time = time.time()

    unique_token = "{}__{}__{}".format(config.data_type, datetime.datetime.now().strftime("%y%m%d_%H%M%S"), config.seed)
    unique_token += "_{}".format(config.dataset_num)
    if config.use_utd:
        unique_token=f"utd{config.utd_step}_{config.utd_step_planner}_{int(config.utd_step_time/1000)}k_{unique_token}"
    if config.use_o2o:
        unique_token="o2o_"+unique_token
    if config.use_warmup:
        if config.use_warmup_sample:
            unique_token="warmup_sample_"+unique_token
        else:
            unique_token="warmup_"+unique_token
    if config.use_offline_data:
        unique_token="offdata_"+unique_token
    if config.use_guide_agent:
        unique_token=f"guide_st{config.guide_max_steps}_n{config.guide_curriculum_n}_th{config.guide_curriculum_threshold}_w{config.guide_return_window}_{unique_token}"
    if config.omar:
        unique_token="omar_"+unique_token
    if config.cf_cql:
        unique_token="cfcql_"+unique_token
    elif config.cql:
        unique_token="cql_"+unique_token
    if config.central_critic:
        unique_token="CC_"+unique_token
    date_str = datetime.datetime.now().strftime("%y%m%d_%H%M%S")[2:6]
    if config.remark:
        date_str = date_str + "_" + config.remark
    if not config.no_log:
        outdir = os.path.join(config.dir, date_str, config.env_id, unique_token)
        os.makedirs(outdir)
        # outdir = prepare_output_dir(config.dir + '/' + config.env_id, argv=sys.argv)
        print('\033[1;32mOutput files are saved in {} \033[1;0m'.format(outdir))
    savedir = os.path.join(config.dir, date_str, 'model', config.env_id, unique_token)
    os.makedirs(savedir)
    print("Model will be saved in", savedir)
    
    torch.manual_seed(config.seed)
    np.random.seed(config.seed)
    torch.cuda.manual_seed(config.seed)
    torch.cuda.manual_seed_all(config.seed)
    random.seed(config.seed)
    torch.backends.cudnn.benchmark = False
    torch.backends.cudnn.deterministic = True

    torch.set_num_threads(config.n_training_threads)

    if config.env_id in ['simple_spread', 'simple_tag', 'simple_world']:
        if config.env_id == 'simple_spread':
            config.lr=0.01
        elif config.env_id == 'simple_world':
            config.steps_per_update=20

        env = make_parallel_env(config.env_id, config.n_rollout_threads, config.seed, config.discrete_action)
        env_args, env_info = None, None
    else:
        env_args = {"scenario": config.env_id, "episode_limit": 1000, "agent_conf": '2x3', "agent_obsk": 0,}
        env = MujocoMulti(env_args=env_args)
        env.seed(config.seed)

        env_info = env.get_env_info()

        config.batch_size = 256
        config.hidden_dim = 256
        if not config.set_lr:
            config.lr = 0.0003
        config.tau = 0.005
        config.gamma = 0.99

        config.omar_iters = 2
        if not config.set_omar_num_samples:
            config.omar_num_samples = 50
        config.omar_num_elites = 5 
    if not config.no_log:
        configure_tb(outdir)
    kwargs={}
    if config.set_alpha:
        kwargs.update({"cql_alpha":config.cql_alpha})
    if config.set_omar_coe:
        kwargs.update({"omar_coe":config.omar_coe})
    if config.central_critic:
        ma_agent = MATD3CC.init_from_env(
            env, config.env_id, config.data_type,
            tau=config.tau, lr=config.lr, hidden_dim=config.hidden_dim,
            cql=config.cql, lse_temp=config.lse_temp, batch_size=config.batch_size, num_sampled_actions=config.num_sampled_actions,
            omar=config.omar, omar_iters=config.omar_iters, omar_mu=config.omar_mu, omar_sigma=config.omar_sigma, omar_num_samples=config.omar_num_samples, omar_num_elites=config.omar_num_elites, 
            env_info=env_info, logging_interval=config.logging_interval, no_log=config.no_log,cf_cql=config.cf_cql, soft_q = not(config.no_soft_q),
            action_noise_scale=config.action_noise_scale, cf_omar=not(config.no_cf_omar), cf_tau=config.cf_tau, no_action_reg=config.no_action_reg, 
            bc_tau=config.bc_tau, sample_action_class=config.sample_action_class, beta_action_noise=config.beta_action_noise,
            cf_pol=not(config.no_cf_pol), cf_target=config.cf_target, **kwargs
        )
    else:
        ma_agent = MATD3.init_from_env(
            env, config.env_id, config.data_type,
            tau=config.tau, lr=config.lr, hidden_dim=config.hidden_dim,
            cql=config.cql, lse_temp=config.lse_temp, batch_size=config.batch_size, num_sampled_actions=config.num_sampled_actions,
            omar=config.omar, omar_iters=config.omar_iters, omar_mu=config.omar_mu, omar_sigma=config.omar_sigma, omar_num_samples=config.omar_num_samples, omar_num_elites=config.omar_num_elites, 
            env_info=env_info, logging_interval=config.logging_interval, no_log=config.no_log,cf_cql=config.cf_cql, soft_q = not(config.no_soft_q), **kwargs
        )

    if config.env_id in ['simple_tag', 'simple_world']:
        pretrained_model_dir = './datasets/{}/pretrained_adv_model.pt'.format(config.env_id)
        ma_agent.load_pretrained_preys(pretrained_model_dir)

    if config.env_id in ['simple_spread', 'simple_tag', 'simple_world']:
        replay_buffer = ReplayBuffer(
            config.buffer_length, ma_agent.nagents,
            [obsp.shape[0] for obsp in env.observation_space],
            [acsp.shape[0] if isinstance(acsp, Box) else acsp.n for acsp in env.action_space],
        )
    else:
        replay_buffer = ReplayBuffer(
            config.buffer_length, ma_agent.nagents,
            [env_info['obs_shape'] for _ in env.observation_space],
            [acsp.shape[0] for acsp in env.action_space],
            is_mamujoco=True,
            state_dims=[env_info['state_shape'] for _ in env.observation_space],
        )

    if config.use_guide_agent:
        guide_agent = MATD3CC.init_from_env(
            env, config.env_id, config.data_type,
            tau=config.tau, lr=config.lr, hidden_dim=config.hidden_dim,
            cql=config.cql, lse_temp=config.lse_temp, batch_size=config.batch_size, num_sampled_actions=config.num_sampled_actions,
            omar=config.omar, omar_iters=config.omar_iters, omar_mu=config.omar_mu, omar_sigma=config.omar_sigma, omar_num_samples=config.omar_num_samples, omar_num_elites=config.omar_num_elites, 
            env_info=env_info, logging_interval=config.logging_interval, no_log=config.no_log,cf_cql=config.cf_cql, soft_q = not(config.no_soft_q),
            action_noise_scale=config.action_noise_scale, cf_omar=not(config.no_cf_omar), cf_tau=config.cf_tau, no_action_reg=config.no_action_reg, 
            bc_tau=config.bc_tau, sample_action_class=config.sample_action_class, beta_action_noise=config.beta_action_noise,
            cf_pol=not(config.no_cf_pol), cf_target=config.cf_target, **kwargs
        )
        guide_agent.load(os.path.join(f"./offline_model/HalfCheetah-v2/{config.data_type}", 'checkpoint.pth'))
        guide_agent.prep_rollouts(device='cpu')

        guide_steps_cnt = 0  # 引导策略执行步数
        guide_tolerated_reward = -np.inf  # 可以容忍的奖励
        guide_return_cnt = 0  # 混合策略评估计数
        guide_return_buf = SlidingWindow(maxlen=config.guide_return_window)  # 用于存储最近的奖励

        guide_horizons=np.arange(config.guide_max_steps, -1, -config.guide_max_steps // config.guide_curriculum_n,)
        guide_horizons_step = 0

    if config.use_o2o:
        if config.use_online_buffer:
            # 在线 buffer
            online_buffer = ReplayBuffer(
                config.buffer_length, ma_agent.nagents,
                [env_info['obs_shape'] for _ in env.observation_space],
                [acsp.shape[0] for acsp in env.action_space],
                is_mamujoco=True,
                state_dims=[env_info['state_shape'] for _ in env.observation_space],
            )
        if config.use_offline_data:
            replay_buffer.load_batch_data(config.dataset_dir, rew_scale = config.rew_scale)
            if np.isinf(replay_buffer.ave_reward):
                replay_buffer.ave_reward = replay_buffer.sum_reward / (replay_buffer.filled_i/config.episode_length)
            print('offline_data average_reward:', replay_buffer.ave_reward)
        # 在线到离线
        if config.use_warmup:
            # 加载离线模型
            ma_agent.load(os.path.join(config.offline_model_dir, 'checkpoint.pth'))

            # 热身采样
            if config.use_warmup_sample:
                for t in range(config.warmup_sample_step):
                    _buffer = online_buffer if config.use_online_buffer else replay_buffer

                    ma_agent.prep_rollouts(device='cpu')
                    if t == 0 or done:
                        env.reset()
                        obs = env.get_obs()
                        state = env.get_state()
                    else:
                        obs = next_obs
                        state = next_state
                    torch_obs = [Variable(torch.Tensor(obs[i]).unsqueeze(0), requires_grad=False) for i in range(ma_agent.nagents)] 
                    torch_agent_actions = ma_agent.step(torch_obs, explore=True)
                    agent_actions = [ac.data.numpy() for ac in torch_agent_actions]
                    actions = [ac.squeeze(0) for ac in agent_actions]
                    
                    reward, done, info = env.step(actions)
                    next_obs = env.get_obs()
                    next_state = env.get_state()

                    # Store data in replay buffer
                    for agent_i in range(ma_agent.nagents):
                        _buffer.obs_buffs[agent_i][_buffer.curr_i] = obs[agent_i]
                        _buffer.ac_buffs[agent_i][_buffer.curr_i] = actions[agent_i]
                        _buffer.rew_buffs[agent_i][_buffer.curr_i] = reward
                        _buffer.next_obs_buffs[agent_i][_buffer.curr_i] = next_obs[agent_i]
                        _buffer.done_buffs[agent_i][_buffer.curr_i] = done
                        if _buffer.is_mamujoco:
                            _buffer.state_buffs[agent_i][_buffer.curr_i] = state[agent_i]
                            _buffer.next_state_buffs[agent_i][_buffer.curr_i] = next_state[agent_i]

                    _buffer.filled_i = min(_buffer.filled_i+1, _buffer.max_steps)
                    _buffer.curr_i = (_buffer.curr_i + 1) % _buffer.max_steps
    else:
        if not config.only_online:
            # 离线训练，仅离线数据
            replay_buffer.load_batch_data(config.dataset_dir, rew_scale = config.rew_scale)
            if np.isinf(replay_buffer.ave_reward):
                replay_buffer.ave_reward = replay_buffer.sum_reward / (replay_buffer.filled_i/config.episode_length)
            print('Average_reward:', replay_buffer.ave_reward)

    #load vae weight
    if config.cf_weight:
        ma_agent.cf_weight=True
        sample = replay_buffer.sample(config.batch_size, to_gpu=True)
        if config.env_id in ['HalfCheetah-v2']:
            states, obs, acs, rews, next_states, next_obs, dones, next_acs = sample
            train_states = states[0]
        else:
            obs, acs, rews, next_obs, dones, next_acs = sample
            train_states = torch.cat([ob.unsqueeze(1) for ob in obs], dim=1).reshape(obs[0].shape[0], obs[0].shape[1]*len(obs)) #bs, ds  
        train_actions = torch.cat(acs, dim=1)
        state_dim = train_states.shape[1]
        action_dim = train_actions.shape[1]
        max_action = env.action_space[0].high[0]
        device='cuda'
        vae = VAE(state_dim, action_dim, action_dim*2, max_action, hidden_dim=config.vae_hidden_dim).to(device)
        load_path = os.path.join(config.vae_model_dir, config.env_id)
        all_dirs = os.listdir(load_path)
        dd = ""
        for d in all_dirs:
            if d.startswith(config.data_type+"_") and d.endswith(str(config.dataset_num)):
                dd=d
                break
        if not len(dd):
            print("No weight of vae finded!")
            exit(1)
        model_path = os.path.join(config.vae_model_dir, config.env_id, dd, "model.pt")
        print("Load from", model_path)
        vae.load_state_dict(torch.load(model_path))
        vae.eval()
        ma_agent.vae = vae

    utd_step_schedule = DecayThenFlatSchedule(config.utd_step, config.utd_step_finish, config.utd_step_time, decay=config.utd_step_planner) if config.use_utd else None

    # === 训练循环 ===
    if config.use_o2o or config.only_online:
        done = True
        ep_r = 0
        ep_r_buf = SlidingWindow(maxlen=10)
        step_cnt = 0

    for t in range(config.num_steps + 1):

        if config.use_o2o or config.only_online:
            # === 在线采样一步并存到buffer ===
            _buffer = online_buffer if config.use_online_buffer else replay_buffer

            ma_agent.prep_rollouts(device='cpu')
            if t == 0 or done:
                env.reset()
                obs = env.get_obs()
                state = env.get_state()
                step_cnt = 0
                guide_steps_cnt = 0
                ep_r = 0
            else:
                obs = next_obs
                state = next_state
                step_cnt += 1
            torch_obs = [Variable(torch.Tensor(obs[i]).unsqueeze(0), requires_grad=False) for i in range(ma_agent.nagents)] 

            guide_step_flag = False
            if config.use_guide_agent:
                if guide_steps_cnt <= guide_horizons[guide_horizons_step]:
                    guide_step_flag = True
            if guide_step_flag:
                torch_agent_actions = guide_agent.step(torch_obs, explore=True)
                guide_steps_cnt += 1
            else:
                torch_agent_actions = ma_agent.step(torch_obs, explore=True)

            agent_actions = [ac.data.numpy() for ac in torch_agent_actions]
            actions = [ac.squeeze(0) for ac in agent_actions]
            
            reward, done, info = env.step(actions)
            next_obs = env.get_obs()
            next_state = env.get_state()

            # Store data in replay buffer
            for agent_i in range(ma_agent.nagents):
                _buffer.obs_buffs[agent_i][_buffer.curr_i] = obs[agent_i]
                _buffer.ac_buffs[agent_i][_buffer.curr_i] = actions[agent_i]
                _buffer.rew_buffs[agent_i][_buffer.curr_i] = reward
                _buffer.next_obs_buffs[agent_i][_buffer.curr_i] = next_obs[agent_i]
                _buffer.done_buffs[agent_i][_buffer.curr_i] = done
                if _buffer.is_mamujoco:
                    _buffer.state_buffs[agent_i][_buffer.curr_i] = state[agent_i]
                    _buffer.next_state_buffs[agent_i][_buffer.curr_i] = next_state[agent_i]

            _buffer.filled_i = min(_buffer.filled_i+1, _buffer.max_steps)
            _buffer.curr_i = (_buffer.curr_i + 1) % _buffer.max_steps

            # 记录奖励
            ep_r += reward
            if done:
                ep_r_buf.append(ep_r)

                # 更新引导策略课程
                if config.use_guide_agent:
                    guide_return_buf.append(ep_r)
                    if guide_return_buf.full:
                        avg_reward = guide_return_buf.mean()
                        # 联合策略（引导 + 探索）的性能达到阈值时，进入下一阶段
                        if guide_tolerated_reward == -np.inf:
                            guide_return_buf.reset()  # 重置奖励缓冲区
                            guide_tolerated_reward = avg_reward  # 初始化容忍奖励
                        elif avg_reward > guide_tolerated_reward * config.guide_curriculum_threshold:  # 严格大于，避免出现0的情况
                            guide_return_buf.reset()  # 重置奖励缓冲区
                            if guide_horizons[guide_horizons_step] > 0:
                                # 更新课程学习阶段
                                guide_horizons_step += 1
                                guide_horizons_step = min(guide_horizons_step, len(guide_horizons) - 1)  # 防止越界
                                print(f"Update horizon to {guide_horizons[guide_horizons_step]} at t_env {t}, horizons step {guide_horizons_step}, tolerated reward {guide_tolerated_reward}")
                                # if guide_horizons[guide_horizons_step] <= 0:
                                #     curriculum_finish_reset_buffer_flag = True  # 标记课程学习结束后重置缓冲区
                            # 更新容忍奖励
                            if avg_reward > guide_tolerated_reward:
                                guide_tolerated_reward = avg_reward

            if config.only_online:
                if len(replay_buffer) < config.batch_size:
                    continue

        if t % config.logging_interval == 0 and not config.no_log and t > 0:
            current_time = time.time()
            elapsed_time = current_time - start_time
            steps_per_second = t / elapsed_time
            estimated_total_time = config.num_steps / steps_per_second
            remaining_time = estimated_total_time - elapsed_time
            print("")
            print(f"Step {t}/{config.num_steps} | "
                  f"Elapsed: {datetime.timedelta(seconds=int(elapsed_time))} | "
                  f"Remaining: {datetime.timedelta(seconds=int(remaining_time))} | "
                  f"Speed: {steps_per_second:.2f} steps/s")
            log_and_print('buf_len', len(replay_buffer), t)
            if config.use_o2o and config.use_online_buffer:
                log_and_print('expl_return', ep_r_buf.mean(), t)
                log_and_print('on_buf_len', len(online_buffer), t)

        if t % config.eval_interval == 0 or t == config.num_steps:
            eval_return = eval_policy(ma_agent, config.env_id, config.seed, config.eval_episodes, config.discrete_action, env_args=env_args)
            if not config.no_log:
                log_and_print('eval_return', eval_return, t, end="\n", start="\n*** ")
                # log_and_print('normed_eval_return', eval_return/replay_buffer.ave_reward, t)
                
        if (t % config.steps_per_update) < config.n_rollout_threads:
            ma_agent.prep_training(device='gpu') if config.use_gpu else ma_agent.prep_training(device='cpu')

            # for u_i in range(config.n_rollout_threads):
            utd_step = math.ceil(utd_step_schedule.eval(t)) if config.use_utd else 1
            for u_i in range(utd_step):
                if config.central_critic:
                    # 采样
                    if config.use_o2o:
                        if config.use_online_buffer:
                            # === 75% offline + 25% online 混合采样 ===
                            offline_batch = int(config.batch_size * 0.75)
                            online_batch  = config.batch_size - offline_batch
                            sample_off = replay_buffer.sample(offline_batch, to_gpu=config.use_gpu)
                            if len(online_buffer) >= online_batch:
                                sample_on = online_buffer.sample(online_batch, to_gpu=config.use_gpu)
                                sample = tuple(
                                    [torch.cat((s1[_i], s2[_i]), dim=0) for _i in range(ma_agent.nagents)]          # 拼接第 0 维
                                    for s1, s2 in zip(sample_off, sample_on)
                                )
                            else:
                                sample = sample_off
                        else:
                            sample = replay_buffer.sample(config.batch_size, to_gpu=config.use_gpu)
                    else:
                        sample = replay_buffer.sample(config.batch_size, to_gpu=config.use_gpu)

                    ma_agent.update(sample, t)
                    for j in range(config.ca_ratio-1):
                        sample = replay_buffer.sample(config.batch_size, to_gpu=config.use_gpu)
                        ma_agent.update(sample, t+j+1, only_critic=True)
                    
                else:
                    nagents = ma_agent.nagents if config.env_id in ['simple_spread', 'HalfCheetah-v2'] else ma_agent.num_predators

                    for a_i in range(nagents):
                        sample = replay_buffer.sample(config.batch_size, to_gpu=config.use_gpu)

                        ma_agent.update(sample, a_i, t)

                ma_agent.update_all_targets()

    # 训练循环结束后保存最终模型
    ma_agent.save(os.path.join(savedir, 'checkpoint.pth'))

    # 训练结束后记录总时间
    total_time = time.time() - start_time
    if not config.no_log:
        log_and_print('time/total_time', total_time, config.num_steps)
        print(f"Training completed in {datetime.timedelta(seconds=int(total_time))}")

    try:
        env.close()
    except:
        pass

if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument("--dir", help="Name of directory to store model/training contents", type=str, default='results')

    parser.add_argument("--env_id", help="Name of environment", type=str, default='simple_spread')
    parser.add_argument("--seed", default=0, type=int, help="Random seed")
    parser.add_argument("--dataset_num", default=1, type=int, help="Dataset number")
    parser.add_argument("--n_rollout_threads", default=1, type=int)
    parser.add_argument("--n_training_threads", default=1, type=int)
    parser.add_argument("--discrete_action", action='store_true', default=False)
    parser.add_argument("--use_gpu", default=1, type=int)

    parser.add_argument("--buffer_length", default=int(1e6), type=int)
    parser.add_argument("--episode_length", default=25, type=int)
    parser.add_argument("--steps_per_update", default=100, type=int)
    parser.add_argument("--batch_size", default=1024, type=int, help="Batch size for model training")
    parser.add_argument("--hidden_dim", default=64, type=int)
    parser.add_argument("--set_lr", action='store_true')
    parser.add_argument("--lr", default=0.001, type=float)
    parser.add_argument("--tau", default=0.01, type=float)
    parser.add_argument('--num_updates', default=1, type=int)
    parser.add_argument("--gamma", default=0.95, type=float)
    parser.add_argument("--rew_scale", default=1.0, type=float)
    

    parser.add_argument('--gaussian_noise_std', default=0.1, type=float)

    parser.add_argument("--data_type", default='medium', type=str)
    parser.add_argument('--dataset_dir', default='./datasets', type=str)

    parser.add_argument('--eval_episodes', default=10, type=int)
    parser.add_argument('--eval_interval', default=1000, type=int)
    parser.add_argument('--num_steps', default=int(1e5), type=int)

    parser.add_argument("--cql", action='store_true')
    parser.add_argument("--set_alpha", action='store_true')
    parser.add_argument('--cql_alpha', default=1.0, type=float)
    parser.add_argument("--lse_temp", default=1.0, type=float)
    parser.add_argument('--num_sampled_actions', default=10, type=int)
    
    parser.add_argument('--sample_action_class', default="1-1-1", type=str, help="random,cur_a,next_a;1 use cf;0 not use")
    parser.add_argument('--beta_action_noise', default=0.00001, type=float)
    
     
    parser.add_argument('--cql_sample_noise_level', default=0.2, type=float)

    parser.add_argument("--omar", action='store_true')
    parser.add_argument('--omar_coe', default=1.0, type=float) 
    parser.add_argument('--set_omar_coe', action='store_true')
    parser.add_argument('--omar_iters', default=3, type=int)
    parser.add_argument('--omar_mu', default=0., type=float)
    parser.add_argument('--omar_sigma', default=2.0, type=float)
    parser.add_argument("--set_omar_num_samples", action='store_true')
    parser.add_argument('--omar_num_samples', default=10, type=int)
    parser.add_argument('--omar_num_elites', default=10, type=int)

    parser.add_argument("--logging_interval", default=1000, type=int)
    parser.add_argument("--central_critic", action='store_true')
    parser.add_argument("--no_log", action='store_true')
    parser.add_argument("--cf_cql", action='store_true')
    parser.add_argument("--no_soft_q", action='store_true')
    parser.add_argument("--ca_ratio", default=5, type=int)
    parser.add_argument("--action_noise_scale", default=0.05, type=float)
    parser.add_argument("--no_cf_omar", action='store_true')
    parser.add_argument("--no_cf_pol", action='store_true')
    parser.add_argument("--cf_target", action='store_true')
    
    

    parser.add_argument("--cf_weight", action='store_true')
    parser.add_argument("--no_action_reg", action='store_true')
    parser.add_argument("--cf_tau",default=1.0, type=float)
    parser.add_argument("--bc_tau",default=0.0, type=float)
    parser.add_argument("--vae_model_dir", default="./results/vae", type=str)
    parser.add_argument("--vae_hidden_dim", default=750, type=int)

    parser.add_argument("--remark", default="", type=str)
    
    parser.add_argument("--use_o2o", action='store_true')
    parser.add_argument("--use_warmup", action='store_true')
    parser.add_argument("--use_warmup_sample", action='store_true')
    parser.add_argument("--warmup_sample_step", default=5000, type=int)
    parser.add_argument("--offline_model_dir", default="./offline_model/HalfCheetah-v2/medium", type=str)
    parser.add_argument("--use_offline_data", action='store_true')
    parser.add_argument("--use_online_buffer", action='store_true')

    parser.add_argument("--use_utd", action='store_true')
    parser.add_argument("--utd_step", default=4, type=float)
    parser.add_argument("--utd_step_finish", default=0.1, type=float)
    parser.add_argument("--utd_step_planner", default="fixed", type=str)
    parser.add_argument("--utd_step_time", default=100000, type=float)

    parser.add_argument("--use_guide_agent", action='store_true')
    parser.add_argument("--guide_return_window", default=10, type=int)
    parser.add_argument("--guide_max_steps", default=1000, type=int)
    parser.add_argument("--guide_curriculum_n", default=10, type=int)
    parser.add_argument("--guide_curriculum_threshold", default=0.95, type=float)

    parser.add_argument("--only_online", action='store_true')
    
    config = parser.parse_args()

    if config.env_id in ['simple_spread', 'simple_tag']:
        config.num_steps = 200000
    elif config.env_id == 'HalfCheetah-v2':
        config.num_steps = int(1e6)
        config.steps_per_update = 10
        config.eval_interval = 10000
        config.episode_length=1000
        config.gamma=0.99
        
    config.dataset_dir = config.dataset_dir + '/' + config.env_id + '/' + config.data_type + '/' + 'seed_{}_data'.format(config.dataset_num)
        
    train(config)