import json
import os
import random
import time
from collections import defaultdict

import jax
import numpy as np
import tqdm
import wandb

from absl import app, flags
from agents import agents
from ml_collections import config_flags

# ---------- 数据集 ----------
from utils.datasets import (
    Dataset, GCDataset, HGCDataset,
    KeyJsonGCDataset, KeyJsonHGCDataset,     # 已有
    RandStitchGCDataset, RandStitchHGCDataset,
    ClusterStitchGCDataset, ClusterStitchHGCDataset,
    DecoupledKeyJsonGCDataset, DecoupledKeyJsonHGCDataset,
    # 新增：LGDA（语言规则跨轨迹 + 可选 keystate）
    LGDAGCDataset, LGDAHGCDataset
)

from utils.env_utils import make_env_and_datasets
from utils.evaluation import evaluate
from utils.flax_utils import restore_agent, save_agent
from utils.log_utils import CsvLogger, get_exp_name, get_flag_dict, get_wandb_video, setup_wandb

FLAGS = flags.FLAGS

# ---------------- basic flags ----------------
flags.DEFINE_string('run_group', 'Debug', 'Run group.')
flags.DEFINE_integer('seed', 0, 'Random seed.')
flags.DEFINE_string('env_name', 'antmaze-large-navigate-v0', 'Environment (dataset) name.')
flags.DEFINE_string('save_dir', 'exp/', 'Save directory.')
flags.DEFINE_string('restore_path', None, 'Restore path.')
flags.DEFINE_integer('restore_epoch', None, 'Restore epoch.')

flags.DEFINE_integer('train_steps', 1_000_000, 'Number of training steps.')
flags.DEFINE_integer('log_interval', 5_000, 'Logging interval.')
flags.DEFINE_integer('eval_interval', 100000, 'Evaluation interval.')
flags.DEFINE_integer('save_interval', 1_000_000, 'Saving interval.')

flags.DEFINE_integer('eval_tasks', None, 'Number of tasks to evaluate (None for all).')
flags.DEFINE_integer('eval_episodes', 20, 'Number of episodes for each task.')
flags.DEFINE_float('eval_temperature', 0, 'Actor temperature for evaluation.')
flags.DEFINE_float('eval_gaussian', None, 'Action Gaussian noise for evaluation.')
flags.DEFINE_integer('video_episodes', 1, 'Number of video episodes for each task.')
flags.DEFINE_integer('video_frame_skip', 3, 'Frame skip for videos.')
flags.DEFINE_integer('eval_on_cpu', 1, 'Whether to evaluate on CPU.')

# ---------------- agent config flag ----------------
config_flags.DEFINE_config_file('agent', 'agents/gciql.py', lock_config=False)

# ---------------- LLM key-state relabel flags ----------------
flags.DEFINE_bool('llm', False, 'Use JSON key-state relabel dataset')
flags.DEFINE_string('llm_json', '/home/gxl/code0717/ogbench/key_states/antsoccer.json', 'Path to key-state JSON file')
flags.DEFINE_string('llm_name', 'ks_any', 'Which key-state to use as future goal (e.g. ks_any, ks_state_2)')
flags.DEFINE_float('llm_prob', 0.2, 'Probability to replace future goal with key-state')
flags.DEFINE_integer('llm_horizon', 15, 'Max forward steps to seek key-state; exceed → fallback to original rule')

# ---------------- 数据增强模式 ----------------
flags.DEFINE_string(
    'gda_mode', 's1',
    "Goal-data-augmentation mode: "
    "'s1'(base), 'sgda', 'tgda', 'lgda', 'llm'(DecoupledKeyJson*)"
)

# ---------------- 概率升温（LGDA & KeyState 可选） ----------------
flags.DEFINE_float('lgda_p_start', 0.05, 'initial stitch_prob for LGDA')
flags.DEFINE_float('lgda_p_end',   0.5,  'final stitch_prob for LGDA')
flags.DEFINE_float('lgda_p_warmup_ratio', 0.3, 'fraction of train_steps to ramp stitch_prob')

# （可选）对 keystate 概率也做升温；不设置则使用 llm_prob 常数
flags.DEFINE_float('llm_prob_start', 0.05, 'optional: ramp start for ks_prob (default: use llm_prob)')
flags.DEFINE_float('llm_prob_end',   0.5, 'optional: ramp end for ks_prob (default: use llm_prob)')
flags.DEFINE_float('llm_prob_warmup_ratio', 0.3, 'fraction of train_steps to ramp ks_prob')

# ---------------------------------------------------------------------------
def _maybe_disable_wandb():
    """Disable wandb completely when WANDB_MODE=disabled."""
    if os.getenv("WANDB_MODE", "disabled").lower() == "disabled":
        class _NoOpRun:
            project = "OGBench"
            id = "disabled"
            def log(self, *a, **k): pass
            def finish(self, *a, **k): pass
        def _noop_init(*a, **k):
            r = _NoOpRun(); wandb.run = r; return r
        wandb.init = _noop_init
        wandb.log = lambda *a, **k: None
        wandb.finish = lambda *a, **k: None
        os.environ["WANDB_SILENT"] = "true"

def _linear_ramp(start, end, step, total, ratio):
    """在 total*ratio 步内线性从 start→end，之后保持 end；ratio<=0 则直接用 end。"""
    if ratio is None or ratio <= 0:
        return float(end)
    T = max(1, int(total * ratio))
    t = min(1.0, step / T)
    return float(start + (end - start) * t)

# ---------------------------------------------------------------------------
def main(_):
    _maybe_disable_wandb()

    # ---------------- logging paths ----------------
    exp_name = get_exp_name(FLAGS.seed)
    setup_wandb(project='OGBench', group=FLAGS.run_group, name=exp_name)

    FLAGS.save_dir = os.path.join(FLAGS.save_dir, wandb.run.project, FLAGS.run_group, exp_name)
    os.makedirs(FLAGS.save_dir, exist_ok=True)
    with open(os.path.join(FLAGS.save_dir, 'flags.json'), 'w') as f:
        json.dump(get_flag_dict(), f)

    # ---------------- env & raw dataset ----------------
    cfg = FLAGS.agent
    env, train_raw, val_raw = make_env_and_datasets(
        FLAGS.env_name, frame_stack=cfg['frame_stack'], dataset_dir='/data/gxl/ogbench/data'
    )

    # ---------------- choose concrete dataset class ----------------
    # 先根据 --llm 选 BaseCls（只影响 base/s1 和 llm 两种路径；sgda/tgda 会覆盖）
    if cfg['dataset_class'] == 'GCDataset':
        BaseCls = KeyJsonGCDataset if FLAGS.llm else GCDataset
    elif cfg['dataset_class'] == 'HGCDataset':
        BaseCls = KeyJsonHGCDataset if FLAGS.llm else HGCDataset
    else:
        raise ValueError(f"Unknown dataset_class {cfg['dataset_class']}")

    # 再按 --gda_mode 包装
    if   FLAGS.gda_mode == 's1':   # base
        DatasetCls = BaseCls
    elif FLAGS.gda_mode == 'sgda':
        DatasetCls = RandStitchGCDataset if issubclass(BaseCls, GCDataset) else RandStitchHGCDataset
    elif FLAGS.gda_mode == 'tgda':
        DatasetCls = ClusterStitchGCDataset if issubclass(BaseCls, GCDataset) else ClusterStitchHGCDataset
    elif FLAGS.gda_mode == 'llm':  # 旧的“解耦”模式（保留）
        if cfg['dataset_class'] == 'HGCDataset':
            DatasetCls = DecoupledKeyJsonHGCDataset
        else:
            DatasetCls = DecoupledKeyJsonGCDataset
    elif FLAGS.gda_mode == 'lgda': # 语言规则跨轨迹 + 可选 keystate（默认开启）
        DatasetCls = LGDAGCDataset if cfg['dataset_class'] == 'GCDataset' else LGDAHGCDataset
    else:
        raise ValueError(f"Unknown gda_mode {FLAGS.gda_mode}")

    # ---------------- build datasets ----------------
    def _build_kwargs_for_keystate(default_use_keystate: bool):
        # lgda 默认开启 keystate；其他模式是否开启由 --llm 决定
        use_ks = default_use_keystate if FLAGS.gda_mode == 'lgda' else FLAGS.llm
        if not use_ks:
            return {}
        return dict(
            json_path     = FLAGS.llm_json,
            keystate_name = FLAGS.llm_name,
            ks_prob       = FLAGS.llm_prob,
            ks_horizon    = FLAGS.llm_horizon,
            use_keystate  = True,
        )

    def _build_kwargs_for_lgda():
        # 仅 lgda 需要：初始 stitch_prob 用 lgda_p_start
        if FLAGS.gda_mode != 'lgda':
            return {}
        return dict(
            json_path    = FLAGS.llm_json,     # LGDA 也从 JSON 里读取 stitchers
            stitch_prob  = FLAGS.lgda_p_start  # 初始概率，后面训练时升温
        )

    if FLAGS.gda_mode == 'lgda':
        # lgda 同时需要 keystate（默认打开）+ stitchers
        ks_kwargs  = _build_kwargs_for_keystate(default_use_keystate=True)
        lg_kwargs  = _build_kwargs_for_lgda()
        build_args = {**ks_kwargs, **lg_kwargs}
        train_dataset = DatasetCls(Dataset.create(**train_raw), cfg, **build_args)
        val_dataset = (DatasetCls(Dataset.create(**val_raw), cfg, **build_args)
                       if val_raw is not None else None)
    else:
        # 其他模式：是否开启 keystate 取决于 --llm
        ks_kwargs = _build_kwargs_for_keystate(default_use_keystate=False)
        train_dataset = DatasetCls(Dataset.create(**train_raw), cfg, **ks_kwargs)
        val_dataset   = (DatasetCls(Dataset.create(**val_raw),   cfg, **ks_kwargs)
                         if val_raw is not None else None)

    # ---------------- agent init ----------------
    random.seed(FLAGS.seed); np.random.seed(FLAGS.seed)
    example = train_dataset.sample(1)
    if cfg['discrete']:
        example['actions'][:] = env.action_space.n - 1
    agent = agents[cfg['agent_name']].create(
        FLAGS.seed, example['observations'], example['actions'], cfg
    )
    if FLAGS.restore_path:  # resume
        agent = restore_agent(agent, FLAGS.restore_path, FLAGS.restore_epoch)

    # ---------------- choose log directory & suffix ----------------
    log_root = os.path.join("logs2", FLAGS.env_name)
    os.makedirs(log_root, exist_ok=True)

    suffix = "_llm" if FLAGS.llm else "_base0"      # 保留是否启用 key-state（注意：lgda 内部也会启用，但文件名仍按 --llm）
    algo   = FLAGS.agent['agent_name']              # 算法名
    mode   = FLAGS.gda_mode                         # 数据增强模式
    exp    = exp_name                               # 本轮实验编号

    fname_base = f"{algo}_{mode}_{exp}{suffix}"

    train_logger = CsvLogger(os.path.join(
        log_root, f"{fname_base}_train.csv"))
    eval_logger  = CsvLogger(os.path.join(
        log_root, f"{fname_base}_eval.csv"))

    # ---------------- training loop ----------------
    tic_total = tic_last = time.time()

    for step in tqdm.tqdm(range(1, FLAGS.train_steps + 1), smoothing=0.1, dynamic_ncols=True):
        # —— 概率升温（仅对训练集；验证集保持常数）——
        if FLAGS.gda_mode == 'lgda':
            if hasattr(train_dataset, 'stitch_prob'):
                train_dataset.stitch_prob = _linear_ramp(
                    FLAGS.lgda_p_start, FLAGS.lgda_p_end, step, FLAGS.train_steps, FLAGS.lgda_p_warmup_ratio
                )
        # keystate 概率（可选升温；未设置 start/end 时恒为 llm_prob）
        if getattr(train_dataset, 'use_keystate', False) and hasattr(train_dataset, 'ks_prob'):
            if FLAGS.llm_prob_start is not None and FLAGS.llm_prob_end is not None:
                kp0, kp1 = FLAGS.llm_prob_start, FLAGS.llm_prob_end
            else:
                kp0 = kp1 = FLAGS.llm_prob
            train_dataset.ks_prob = _linear_ramp(
                kp0, kp1, step, FLAGS.train_steps, FLAGS.llm_prob_warmup_ratio
            )

        # —— 取 batch & 更新 —— 
        batch = train_dataset.sample(cfg['batch_size'])
        agent, info = agent.update(batch)

        # ----------- logging -----------
        if step % FLAGS.log_interval == 0:
            metrics = {f'training/{k}': v for k, v in info.items()}
            # 记录当前概率（若存在）
            if hasattr(train_dataset, 'stitch_prob'):
                metrics['training/stitch_prob'] = float(train_dataset.stitch_prob)
            if hasattr(train_dataset, 'ks_prob'):
                metrics['training/ks_prob'] = float(train_dataset.ks_prob)

            if val_dataset is not None:
                val_batch = val_dataset.sample(cfg['batch_size'])
                _, val_info = agent.total_loss(val_batch, grad_params=None)
                metrics.update({f'validation/{k}': v for k, v in val_info.items()})
            metrics['time/epoch_time'] = (time.time() - tic_last) / FLAGS.log_interval
            metrics['time/total_time'] = time.time() - tic_total
            tic_last = time.time()
            wandb.log(metrics, step=step)
            train_logger.log(metrics, step=step)

        # ----------- evaluation -----------
        if step == 1 or step % FLAGS.eval_interval == 0:
            eval_agent = (jax.device_put(agent, device=jax.devices('cpu')[0])
                          if FLAGS.eval_on_cpu else agent)
            renders, eval_metrics, overall = [], {}, defaultdict(list)
            tasks = env.unwrapped.task_infos if hasattr(env.unwrapped, 'task_infos') else env.task_infos
            num_tasks = FLAGS.eval_tasks or len(tasks)
            for t_id in tqdm.trange(1, num_tasks + 1):
                name = tasks[t_id - 1]['task_name']
                ei, _, vr = evaluate(
                    eval_agent, env, task_id=t_id, config=cfg,
                    num_eval_episodes=FLAGS.eval_episodes,
                    num_video_episodes=FLAGS.video_episodes,
                    video_frame_skip=FLAGS.video_frame_skip,
                    eval_temperature=FLAGS.eval_temperature,
                    eval_gaussian=FLAGS.eval_gaussian,
                )
                renders.extend(vr)
                for k in ['success']:
                    eval_metrics[f'evaluation/{name}_{k}'] = ei[k]
                    overall[k].append(ei[k])
            for k, vs in overall.items():
                eval_metrics[f'evaluation/overall_{k}'] = np.mean(vs)
            # ↓↓↓ 加入 renders 判空 ↓↓↓
            if FLAGS.video_episodes > 0 and renders:
                video = get_wandb_video(renders, n_cols=num_tasks)
                eval_metrics['video'] = video

            wandb.log(eval_metrics, step=step)
            eval_logger.log(eval_metrics, step=step)

        # ----------- save -----------
        if step % FLAGS.save_interval == 0:
            save_agent(agent, FLAGS.save_dir, step)

    train_logger.close(); eval_logger.close()


if __name__ == '__main__':
    app.run(main)
