import gym
import numpy as np
import torch
import wandb

import argparse
import pickle
import random
import sys

import torch.nn.functional as F


def discount_cumsum(x, gamma):
    discount_cumsum = np.zeros_like(x)
    discount_cumsum[-1] = x[-1]
    for t in reversed(range(x.shape[0] - 1)):
        discount_cumsum[t] = x[t] + gamma * discount_cumsum[t + 1]
    return discount_cumsum


def experiment(variant):
    seed = variant['seed']

    if seed:
        random.seed(seed)
        np.random.seed(seed)
        torch.manual_seed(seed)
        torch.backends.cudnn.deterministic = True

    from decision_transformer.evaluation.evaluate_episodes import evaluate_episode, evaluate_episode_rtg
    from decision_transformer.models.decision_transformer import DecisionTransformer
    from decision_transformer.models.mlp_bc import MLPBCModel
    from decision_transformer.training.act_trainer import ActTrainer
    from decision_transformer.training.seq_trainer import SequenceTrainer

    device = variant.get('device', 'cuda')
    log_to_wandb = variant.get('log_to_wandb', False)
    exp_prefix = variant.get('exp_prefix', 'gym-experiment')

    env_name, dataset = variant['env'], variant['dataset']
    model_type = variant['model_type']
    group_name = f'{exp_prefix}-{env_name}-{dataset}'
    exp_prefix = f'{group_name}-{random.randint(int(1e5), int(1e6) - 1)}'

    is_discrete_action = False

    if env_name == 'CartPole-v0':
        # env = gym.make(env_name)
        from tools.my_wrapper_carpole import MyWrapperCartPole

        env = MyWrapperCartPole(seed=seed)

        max_ep_len = 300
        # env_targets = [300, 200]  # evaluation conditioning targets
        env_targets = [100, 300]  # evaluation conditioning targets
        # scale = 100.  # normalization for rewards/returns
        scale = None  # normalization for rewards/returns, if none, _scale will be norm(rewards)
        is_discrete_action = env.is_discrete_action
    elif env_name == 'genshin-v0':
        print('on genshin-v0')

    else:
        raise NotImplementedError

    if model_type == 'bc':
        env_targets = env_targets[:1]  # since BC ignores target, no need for different evaluations

    state_dim = env.observation_space.shape[0]
    if env.action_space.shape:
        act_dim = env.action_space.shape[0]
    else:
        act_dim = 1
        # act_dim = env.action_space.n

    # load dataset
    if env_name in ['Pendulum-v1']:
        dataset_path = f"./data/{env_name}.txt"

        def conv_txt_to_pkl(dataset_path):
            data = np.loadtxt(dataset_path)
            trajectories = []
            path = {
                'observations': [],
                'actions': [],
                'rewards': [],
                'next_observations': [],
                'terminals': [],
            }

            for d in data:
                state, action, reward, next_state, terminated = list(d[:3]), d[3], d[4], list(d[5:8]), d[8]

                path['observations'].append(state)
                path['actions'].append(action)
                path['rewards'].append(reward)
                path['next_observations'].append(next_state)
                path['terminals'].append(terminated)

                if terminated:
                    path = {k: np.array(v) for k, v in path.items()}
                    trajectories.append(path)
                    path = {
                        'observations': [],
                        'actions': [],
                        'rewards': [],
                        'next_observations': [],
                        'terminals': [],
                    }
            return trajectories

        trajectories = conv_txt_to_pkl(dataset_path)
    elif env_name in ['CartPole-v0']:
        import json
        dataset_path = f"./data/trajectories__{env_name}.json"
        with open(dataset_path, 'rb') as f:
            trajectories = json.load(f)
        # len(trajectories)
        # trajectories[0]
        # [tr['rewards_sum'] for tr in trajectories]
        # [tr['rewards'] for tr in trajectories]
        for tr in trajectories:
            for k, v in tr.items():
                if k == 'rewards_sum':
                    continue
                tr[k] = np.array(v)
        # tr['rewards'].sum()

    elif env_name in ['CartPole-v0-pixel']:
        import json
        dataset_path = f"./data/trajectories__{env_name}.json"
        with open(dataset_path, 'rb') as f:
            trajectories = json.load(f)

        for tr in trajectories:
            for k, v in tr.items():
                if k == 'rewards_sum':
                    continue
                tr[k] = np.array(v)
    else:
        dataset_path = f'data/{env_name}-{dataset}-v2.pkl'
        with open(dataset_path, 'rb') as f:
            trajectories = pickle.load(f)

    # save all path information into separate lists
    mode = variant.get('mode', 'normal')
    states, traj_lens, returns = [], [], []
    for path in trajectories:
        if mode == 'delayed':  # delayed: all rewards moved to end of trajectory
            path['rewards'][-1] = path['rewards'].sum()
            path['rewards'][:-1] = 0.
        states.append(path['observations'])
        traj_lens.append(len(path['observations']))
        returns.append(path['rewards'].sum())
    traj_lens, returns = np.array(traj_lens), np.array(returns)

    # used for input normalization
    states = np.concatenate(states, axis=0)
    state_mean, state_std = np.mean(states, axis=0), np.std(states, axis=0) + 1e-6

    num_timesteps = sum(traj_lens)

    print('=' * 50)
    avg_returns = np.mean(returns)  # _scale
    std_returns = np.std(returns)
    # scale = scale if scale else std_returns
    scale = scale if scale else avg_returns

    print(f'Starting new experiment: {env_name} {dataset}')
    print(f'{len(traj_lens)} trajectories, {num_timesteps} timesteps found')
    print(f'Average return: {avg_returns:.2f}, std: {std_returns:.2f}')
    print(f'Max return: {np.max(returns):.2f}, min: {np.min(returns):.2f}')
    print('=' * 50)

    K = variant['K']
    batch_size = variant['batch_size']
    num_eval_episodes = variant['num_eval_episodes']
    pct_traj = variant.get('pct_traj', 1.)

    # only train on top pct_traj trajectories (for %BC experiment)
    num_timesteps = max(int(pct_traj * num_timesteps), 1)
    sorted_inds = np.argsort(returns)  # lowest to highest
    num_trajectories = 1
    timesteps = traj_lens[sorted_inds[-1]]
    ind = len(trajectories) - 2
    while ind >= 0 and timesteps + traj_lens[sorted_inds[ind]] <= num_timesteps:
        timesteps += traj_lens[sorted_inds[ind]]
        num_trajectories += 1
        ind -= 1
    sorted_inds = sorted_inds[-num_trajectories:]

    # used to reweight sampling so we sample according to timesteps instead of trajectories
    p_sample = traj_lens[sorted_inds] / sum(traj_lens[sorted_inds])

    def get_batch(batch_size=256, max_len=K):
        batch_inds = np.random.choice(
            np.arange(num_trajectories),
            size=batch_size,
            replace=True,
            p=p_sample,  # reweights so we sample according to timesteps
        )

        s, a, r, d, rtg, timesteps, mask = [], [], [], [], [], [], []
        for i in range(batch_size):
            # i = 43
            traj = trajectories[int(sorted_inds[batch_inds[i]])]
            si = random.randint(0, traj['rewards'].shape[0] - 1)
            # (si, si + max_len, max_len)

            # get sequences from dataset
            s.append(traj['observations'][si:si + max_len].reshape(1, -1, state_dim))

            _a = traj['actions'][si:si + max_len]

            if env.is_discrete_action:
                if act_dim == 1:
                    a.append(_a.reshape(1, -1, 1))  # 如果`a`类似于[0. 2, 1, 4, 1, 2]这种值
                else:
                    a.append(np.eye(act_dim)[_a][np.newaxis, :])
            else:
                a.append(_a.reshape(1, -1, act_dim))

            # try:
            #     a.append(traj['actions'][si:si + max_len].reshape(1, -1, act_dim))
            # except Exception as e:
            #     print(e)
            #     si, si + max_len, max_len
            #
            #     _a
            #     _a.shape
            #     _a.reshape(1, -1, act_dim)
            #     act_dim
            #     one_hot_matrix = np.eye(act_dim)[_a]
            #
            #     env.is_discrete_action
            #
            #     a[-1].shape
            #     i
            #     env.action_space
            #     traj['observations'][si:si + max_len].shape
            #
            #     raise e
            r.append(traj['rewards'][si:si + max_len].reshape(1, -1, 1))
            if 'terminals' in traj:
                d.append(traj['terminals'][si:si + max_len].reshape(1, -1))
            else:
                d.append(traj['dones'][si:si + max_len].reshape(1, -1))
            timesteps.append(np.arange(si, si + s[-1].shape[1]).reshape(1, -1))
            timesteps[-1][timesteps[-1] >= max_ep_len] = max_ep_len - 1  # padding cutoff
            # rtg = []
            rtg.append(discount_cumsum(traj['rewards'][si:], gamma=1.)[:s[-1].shape[1] + 1].reshape(1, -1, 1))
            if rtg[-1].shape[1] <= s[-1].shape[1]:
                rtg[-1] = np.concatenate([rtg[-1], np.zeros((1, 1, 1))], axis=1)

            # padding and state + reward normalization
            tlen = s[-1].shape[1]
            s[-1] = np.concatenate([np.zeros((1, max_len - tlen, state_dim)), s[-1]], axis=1)
            s[-1] = (s[-1] - state_mean) / state_std
            try:
                a[-1] = np.concatenate([np.ones((1, max_len - tlen, act_dim)) * -10., a[-1]], axis=1)
            except Exception as e:
                print(e)
                print(f'--- i: {i}')
                # a[-1].shape
                # (np.ones((1, max_len - tlen, act_dim)) * -10.).shape
                # np.concatenate([np.ones((1, max_len - tlen, act_dim)) * -10., a[-1]], axis=1)
                raise e

            r[-1] = np.concatenate([np.zeros((1, max_len - tlen, 1)), r[-1]], axis=1)
            d[-1] = np.concatenate([np.ones((1, max_len - tlen)) * 2, d[-1]], axis=1)

            # _scale
            _rtg = np.concatenate([np.zeros((1, max_len - tlen, 1)), rtg[-1]], axis=1)
            # rtg[-1] = _rtg / scale if scale else (_rtg - avg_returns) / std_returns
            # rtg[-1] = _rtg / scale if scale else _rtg / std_returns
            rtg[-1] = _rtg / scale
            timesteps[-1] = np.concatenate([np.zeros((1, max_len - tlen)), timesteps[-1]], axis=1)
            mask.append(np.concatenate([np.zeros((1, max_len - tlen)), np.ones((1, tlen))], axis=1))

        s = torch.from_numpy(np.concatenate(s, axis=0)).to(dtype=torch.float32, device=device)
        a = torch.from_numpy(np.concatenate(a, axis=0)).to(dtype=torch.float32, device=device)
        r = torch.from_numpy(np.concatenate(r, axis=0)).to(dtype=torch.float32, device=device)
        d = torch.from_numpy(np.concatenate(d, axis=0)).to(dtype=torch.long, device=device)
        rtg = torch.from_numpy(np.concatenate(rtg, axis=0)).to(dtype=torch.float32, device=device)
        timesteps = torch.from_numpy(np.concatenate(timesteps, axis=0)).to(dtype=torch.long, device=device)
        mask = torch.from_numpy(np.concatenate(mask, axis=0)).to(device=device)

        return s, a, r, d, rtg, timesteps, mask

    def eval_episodes(target_rew):
        # print('--- on train_iteration.eval')
        def fn(model):
            # print(f'--- on eval_episodes({target_rew}).fn')
            returns, lengths = [], []
            for _ in range(num_eval_episodes):
                with torch.no_grad():
                    if model_type == 'dt':
                        ret, length = evaluate_episode_rtg(
                            env,
                            state_dim,
                            act_dim,
                            model,
                            max_ep_len=max_ep_len,
                            scale=scale,
                            target_return=target_rew / scale,
                            mode=mode,
                            state_mean=state_mean,
                            state_std=state_std,
                            device=device,
                        )
                    else:
                        ret, length = evaluate_episode(
                            env,
                            state_dim,
                            act_dim,
                            model,
                            max_ep_len=max_ep_len,
                            target_return=target_rew / scale,
                            mode=mode,
                            state_mean=state_mean,
                            state_std=state_std,
                            device=device,
                        )
                returns.append(ret)
                lengths.append(length)
            return {
                f'target_{target_rew}_return_mean': np.mean(returns),
                f'target_{target_rew}_return_std': np.std(returns),
                f'target_{target_rew}_length_mean': np.mean(lengths),
                f'target_{target_rew}_length_std': np.std(lengths),
            }

        return fn

    if model_type == 'dt':
        model = DecisionTransformer(
            state_dim=state_dim,
            act_dim=act_dim,
            max_length=K,
            max_ep_len=max_ep_len,
            hidden_size=variant['embed_dim'],
            n_layer=variant['n_layer'],
            n_head=variant['n_head'],
            n_inner=4 * variant['embed_dim'],
            activation_function=variant['activation_function'],
            n_positions=1024,
            resid_pdrop=variant['dropout'],
            attn_pdrop=variant['dropout'],
        )
    elif model_type == 'bc':
        model = MLPBCModel(
            state_dim=state_dim,
            act_dim=act_dim,
            max_length=K,
            hidden_size=variant['embed_dim'],
            n_layer=variant['n_layer'],
        )
    else:
        raise NotImplementedError

    model = model.to(device=device)
    optimizer = torch.optim.AdamW(
        model.parameters(),
        lr=variant['learning_rate'],
        weight_decay=variant['weight_decay'],
    )

    num_steps_per_iter = variant['num_steps_per_iter']
    max_iters = variant['max_iters']
    total_steps = max_iters * num_steps_per_iter
    variant['warmup_steps'] = variant['warmup_steps'] if variant['warmup_steps'] else 1
    # variant['warmup_steps'] = variant['warmup_steps'] if variant['warmup_steps'] else total_steps / 3
    # variant['warmup_steps'] = variant['warmup_steps'] if variant['warmup_steps'] else max_iters / 3

    scheduler = torch.optim.lr_scheduler.LambdaLR(
        optimizer,
        lambda steps: min((steps + 1) / variant['warmup_steps'], 1)
    )

    # torch.optim.lr_scheduler.LambdaLR.step

    # region 定义traner
    def loss_fn__by_discrete_action(s_hat, a_hat, r_hat, s, a, r):
        # print('--- loss_fn__by_discrete_action')
        l_a = F.binary_cross_entropy_with_logits(a_hat, a)

        # l_r = F.mse_loss(r_hat, r) if None not in [r_hat, r] else 0
        # # l_s = F.mse_loss(s_hat, s) if None not in [s_hat, s] else 0
        #
        # l_r = 0
        # l_s = 0
        #
        # res = (l_a + l_r + l_s) / 2
        res = l_a
        return res

    if model_type == 'dt':
        if is_discrete_action:
            loss_fn = loss_fn__by_discrete_action
            # loss_fn = lambda s_hat, a_hat, r_hat, s, a, r: F.cross_entropy(a_hat, a)
        else:
            # loss_fn = lambda s_hat, a_hat, r_hat, s, a, r: torch.mean((a_hat - a) ** 2)
            loss_fn = lambda s_hat, a_hat, r_hat, s, a, r: F.mse_loss(a_hat, a)
        trainer = SequenceTrainer(
            model=model,
            optimizer=optimizer,
            batch_size=batch_size,
            get_batch=get_batch,
            scheduler=scheduler,
            # loss_fn=lambda s_hat, a_hat, r_hat, s, a, r: torch.mean((a_hat - a) ** 2),
            loss_fn=loss_fn,
            eval_fns=[eval_episodes(tar) for tar in env_targets],
        )
    elif model_type == 'bc':
        if is_discrete_action:
            loss_fn = loss_fn__by_discrete_action
        else:
            loss_fn=lambda s_hat, a_hat, r_hat, s, a, r: torch.mean((a_hat - a) ** 2),

        trainer = ActTrainer(
            model=model,
            optimizer=optimizer,
            batch_size=batch_size,
            get_batch=get_batch,
            scheduler=scheduler,
            loss_fn=loss_fn,
            eval_fns=[eval_episodes(tar) for tar in env_targets],
        )
    # endregion

    hyperparameters = "|param|value|\n|-|-|\n%s" % ("\n".join([f"|{key}|{value}|" for key, value in variant.items()]))
    print(hyperparameters)

    if log_to_wandb:
        # wandb.init(
        #     name=exp_prefix,
        #     group=group_name,
        #     project='decision-transformer',
        #     config=variant
        # )
        # wandb.watch(model)  # wandb has some bug

        from torch.utils.tensorboard import SummaryWriter
        import tensorboard
        log_dir = f'runs/{exp_prefix}'
        writer = SummaryWriter(log_dir)
        writer.add_text(
            "hyperparameters",
            hyperparameters,
        )
        # SummaryWriter

    # trainer.train_iteration
    for iter in range(max_iters):
        iter_num = iter + 1
        outputs = trainer.train_iteration(num_steps=num_steps_per_iter, iter_num=iter_num, print_logs=True)

        if log_to_wandb:
            for key, value in outputs.items():
                writer.add_scalar(key, value, global_step=iter_num)  # epoch是你当前的训练轮数，你可以根据实际情况调整

        return_mean = outputs['_return_mean']
        if return_mean >= env.spec.reward_threshold:
            print(f'--- 第[{iter_num}]个回合的return_mean[{round(return_mean, 3)}]达到`env.spec.reward_threshold`[{env.spec.reward_threshold}]!')
            break
        # from bdtime import show_json, show_ls
        # show_json(outputs)


if __name__ == '__main__':
    from distutils.util import strtobool
    parser = argparse.ArgumentParser()

    parser.add_argument('-ep', '--exp_prefix', type=str, default='gym-experiment')

    # parser.add_argument('--env', type=str, default='CartPole-v0')
    # parser.add_argument('--env', type=str, default='CartPole-v0-pixel')
    parser.add_argument('--env', type=str, default='genshin-v0')

    parser.add_argument("--seed", type=int, default=1, help="seed of the experiment")

    # parser.add_argument('--env', type=str, default='hopper')

    parser.add_argument('--dataset', type=str, default='medium')  # medium, medium-replay, medium-expert, expert
    parser.add_argument('--mode', type=str, default='normal')  # normal for standard setting, delayed for sparse
    parser.add_argument('--K', type=int, default=20)
    parser.add_argument('--pct_traj', type=float, default=1.)
    parser.add_argument('--batch_size', type=int, default=64)
    parser.add_argument('--model_type', type=str, default='dt')  # dt for decision transformer, bc for behavior cloning
    parser.add_argument('--embed_dim', type=int, default=128)
    parser.add_argument('--n_layer', type=int, default=3)
    parser.add_argument('--n_head', type=int, default=1)
    parser.add_argument('--activation_function', type=str, default='relu')
    parser.add_argument('--dropout', type=float, default=0.1)
    parser.add_argument('--learning_rate', '-lr', type=float, default=1e-3)
    parser.add_argument('--weight_decay', '-wd', type=float, default=1e-4)
    parser.add_argument('--warmup_steps', type=int, default=None)
    parser.add_argument('--num_eval_episodes', type=int, default=10)
    parser.add_argument('--max_iters', type=int, default=100)
    parser.add_argument('--num_steps_per_iter', type=int, default=1)
    parser.add_argument('--device', type=str, default='cuda')

    # parser.add_argument('--log_to_wandb', '-w', type=bool, default=True)
    parser.add_argument("--log_to_wandb", type=lambda x: bool(strtobool(x)), default=True, nargs="?", const=True,
                        help="if toggled, will log to tensorboard's log_dir")

    # parser.add_argument('--remove_logdir', '-rm', type=bool, default=False)
    # parser.add_argument('--remove_logdir', '-rm', type=bool, default=True)

    parser.add_argument("--remove_logdir",
                        type=lambda x: bool(strtobool(x)), default=False, nargs="?",
                        help="if toggled, will remove tensorboard's existed log_dir") # , const=True

    args = parser.parse_args()
    variant = vars(args)

    import os
    from bdtime import tt
    from tools.remove_temp_file import remove_path
    if variant['log_to_wandb'] and variant['remove_logdir']:
        tt.tqdm_sleep(desc="****** Warning: flag__remove_dirs 将清除其它实验记录! 确定继续?", T=6)
        remove_dir_ls = ['videos', 'runs']
        assert isinstance(remove_dir_ls, list), 'remove_dir_ls必须为list类型!'
        for remove_dir in remove_dir_ls:
            if os.path.exists(remove_dir):
                remove_path(remove_dir, keep_external_folder=True)
        tt.sleep(1)

    experiment(variant=variant)
