# rl_train.py (Fully Adjusted with SymbolicManager)
import logging
import time
import json
import os
import random

import numpy as np
import torch
import torch.multiprocessing as mp
from tensorboardX import SummaryWriter

from deadlock_env import DeadlockEnv
from net.numerical_coat_net import NumericalCoATNet
# 确保从你的项目中正确导入 PPOAgent
from net.ppo_agent import PPOAgent
from util.logger_util import createLogger
from util.state_processor import process_observation_to_sequence
# 导入SymbolicManager类
from util.symbolic_manager import SymbolicManager
# 假设评估函数也被更新以接收一个manager
from util.evaluate import evaluate_symbolic_agent
# --- [新] 导入轨迹验证器 ---
from util.trace_validator import TraceValidator

# 请确保这个路径指向你用新代码训练好的SFT模型
SFT_MODEL_DIR = "models/sft_symbolic_20251015_164334"
logger = createLogger('Train')

# CONFIG现在更简洁，不包含硬编码的词典大小
CONFIG = {
    "NUM_PROCESSES": 5, "NUM_RESOURCES": 3, "D_MODEL": 128, "N_HEAD": 8,
    "NUM_ENCODER_LAYERS": 3, "NUM_DECODER_LAYERS": 3,
    "MAX_TRACE_LENGTH": 25,  # 生成的符号轨迹的最大长度
    "TOTAL_TIMESTEPS": 5000000, "STEPS_PER_COLLECT": 512, "NUM_WORKERS": 8,
    "LR": 1e-5, "UPDATE_EPOCHS": 5,
    "EVAL_INTERVAL": 25, "EVAL_EPISODES": 20,
    "TEMPERATURE": 1.5,  # 为探索引入温度参数

    # --- [新] 为PPO Agent添加超参数 ---
    "GAMMA": 0.99,
    "GAE_LAMBDA": 0.95,
    "CLIP_EPSILON": 0.2,
    "ENTROPY_COEF": 0.02,
    "KL_BETA": 0.2,  # <--- [核心修改 1] KL正则化系数

    # --- 增强奖励塑造 ---
    "REWARD_SHAPING": {
        "PENALTY_NO_DECIDE": -5,
        "PENALTY_NO_CYCLE_FOUND": -0.5,
    }
}

MAX_EVAL_STEPS_PER_EPISODE = 50


def make_env():
    return DeadlockEnv(num_processes=CONFIG['NUM_PROCESSES'], num_resources=CONFIG['NUM_RESOURCES'])


def worker(pipe, env_fn):
    env = env_fn()
    try:
        while True:
            cmd, data = pipe.recv()
            if cmd == 'step':
                obs, reward, done, info, mask = env.step(data)
                if done:
                    obs, mask = env.reset()
                pipe.send((obs, reward, done, info, mask))
            elif cmd == 'reset':
                obs, mask = env.reset()
                pipe.send((obs, mask))
            elif cmd == 'close':
                pipe.close()
                break
    except KeyboardInterrupt:
        pass


class SubprocVecEnv:
    def __init__(self, env_fns):
        self.num_envs = len(env_fns)
        self.pipes, worker_pipes = zip(*[mp.Pipe() for _ in range(self.num_envs)])
        self.procs = [mp.Process(target=worker, args=(p, fn)) for p, fn in zip(worker_pipes, env_fns)]
        for p in self.procs:
            p.daemon = True
            p.start()
        for p in worker_pipes:
            p.close()

    def reset(self):
        for pipe in self.pipes:
            pipe.send(('reset', None))
        results = [pipe.recv() for pipe in self.pipes]
        obs, masks = zip(*results)
        return list(obs), list(masks)

    def step(self, actions):
        for pipe, action in zip(self.pipes, actions):
            pipe.send(('step', action))
        results = [pipe.recv() for pipe in self.pipes]
        obs, rewards, dones, infos, masks = zip(*results)
        return list(obs), np.array(rewards), np.array(dones), list(infos), list(masks)

    def close(self):
        for pipe in self.pipes:
            pipe.send(('close', None))
        for p in self.procs:
            p.join()


if __name__ == '__main__':
    # 1. 创建SymbolicManager实例和TraceValidator实例
    symbol_manager = SymbolicManager(
        num_processes=CONFIG['NUM_PROCESSES'],
        num_resources=CONFIG['NUM_RESOURCES']
    )
    trace_validator = TraceValidator(symbol_manager)

    # 动态确定特征维度和词汇表大小
    temp_env = make_env()
    temp_obs, _ = temp_env.reset()
    sample_seq = process_observation_to_sequence(temp_obs)
    feature_dim = sample_seq.shape[1]
    CONFIG['VOCAB_SIZE'] = symbol_manager.get_vocab_size()
    print(f"动态确定的特征维度: {feature_dim}, 词典大小: {CONFIG['VOCAB_SIZE']}")

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    print(f"使用设备: {device}")

    experiment_name = "rl_symbolic_fix"
    timestamp = time.strftime('%Y%m%d_%H%M%S')
    save_dir = os.path.join('models', f'{experiment_name}_{timestamp}')
    os.makedirs(save_dir, exist_ok=True)
    writer = SummaryWriter(save_dir)
    print(f"所有产物将保存至: {save_dir}")

    # 2. 初始化环境和模型
    env_fns = [make_env for _ in range(CONFIG['NUM_WORKERS'])]
    envs = SubprocVecEnv(env_fns)

    model = NumericalCoATNet(
        feature_dim=feature_dim,  # 使用动态确定的维度
        vocab_size=CONFIG['VOCAB_SIZE'],
        d_model=CONFIG['D_MODEL'],
        nhead=CONFIG['N_HEAD'],
        num_encoder_layers=CONFIG['NUM_ENCODER_LAYERS'],
        num_decoder_layers=CONFIG['NUM_DECODER_LAYERS']
    ).to(device)

    try:
        sft_config_path = os.path.join(SFT_MODEL_DIR, "config.json")
        with open(sft_config_path, 'r') as f:
            sft_config = json.load(f)
        if sft_config['VOCAB_SIZE'] != CONFIG['VOCAB_SIZE']:
            raise ValueError(f"SFT vocab size ({sft_config['VOCAB_SIZE']}) 与当前配置 ({CONFIG['VOCAB_SIZE']}) 不匹配!")

        sft_model_path = os.path.join(SFT_MODEL_DIR, "model.pth")
        model.load_state_dict(torch.load(sft_model_path, map_location=device))
        print(f"成功加载SFT预训练模型: {sft_model_path}")
    except Exception as e:
        print(f"加载SFT模型失败 (路径: {SFT_MODEL_DIR}): {e}。将从头开始训练。")

    # --- [核心修改 2] 使用CONFIG中的所有超参数来初始化Agent ---
    agent = PPOAgent(
        model,
        device=device,
        lr=CONFIG['LR'],
        update_epochs=CONFIG['UPDATE_EPOCHS'],
        clip_epsilon=CONFIG['CLIP_EPSILON'],
        gamma=CONFIG['GAMMA'],
        gae_lambda=CONFIG['GAE_LAMBDA'],
        entropy_coef=CONFIG['ENTROPY_COEF'],
        kl_beta=CONFIG['KL_BETA']
    )

    # 3. 主训练循环
    print("\n--- 开始强化学习训练 (PPO with Symbolic CoT) ---")
    obs_list, masks_list = envs.reset()
    obs_sequences = [process_observation_to_sequence(obs) for obs in obs_list]
    num_updates = CONFIG['TOTAL_TIMESTEPS'] // CONFIG['STEPS_PER_COLLECT']

    episode_rewards = np.zeros(CONFIG['NUM_WORKERS'], dtype=np.float32)
    episode_lengths = np.zeros(CONFIG['NUM_WORKERS'], dtype=np.int32)


    def _is_deadlocked(obs, num_processes, num_resources):
        """一个轻量级的辅助函数，用于检查当前状态是否真的存在死锁环路。"""
        waits_for = {}
        resource_holder = {}
        for p_id in range(num_processes):
            for r_id in range(num_resources):
                if obs['allocation'][p_id, r_id] > 0:
                    resource_holder[r_id] = p_id

        blocked_pids = np.where(obs['processes_status'] == 1)[0]
        for p_id in blocked_pids:
            needed_r_ids = np.where(obs['need'][p_id] > 0)[0]
            if needed_r_ids.any():
                needed_r_id = needed_r_ids[0]
                if needed_r_id in resource_holder:
                    waits_for[p_id] = resource_holder[needed_r_id]

        path = set()
        visited = set()
        for p_id in waits_for:
            if p_id not in visited:
                node = p_id
                while node is not None and node not in visited:
                    path.add(node)
                    visited.add(node)
                    node = waits_for.get(node)
                    if node in path:
                        return True
                path.clear()
        return False


    for update_step in range(num_updates):
        start_time = time.time()
        agent.clear_buffer()
        completed_ep_rewards = []
        completed_ep_lengths = []

        # 数据收集循环
        for _ in range(CONFIG['STEPS_PER_COLLECT'] // CONFIG['NUM_WORKERS']):
            action_traces, log_probs, values = agent.select_action(
                obs_sequences,
                max_len=CONFIG['MAX_TRACE_LENGTH'],
                symbol_manager=symbol_manager,
                temperature=CONFIG['TEMPERATURE']
            )

            action_dicts = []
            trace_rewards = np.zeros(CONFIG['NUM_WORKERS'], dtype=np.float32)
            rs_config = CONFIG["REWARD_SHAPING"]

            for i, trace in enumerate(action_traces):
                logic_reward = trace_validator.validate_and_reward(trace.cpu().numpy(), obs_list[i])
                trace_rewards[i] += logic_reward

                action_dict = symbol_manager.parse_action_from_trace(trace.cpu().numpy())
                if action_dict is None:
                    trace_rewards[i] += rs_config["PENALTY_NO_DECIDE"]
                    action_dict = {'type': 'rollback', 'process_id': random.randint(0, CONFIG['NUM_PROCESSES'] - 1)}
                else:
                    logger.info(action_dict)
                has_deadlock = _is_deadlocked(obs_list[i], CONFIG['NUM_PROCESSES'], CONFIG['NUM_RESOURCES'])
                found_cycle_token = symbol_manager.CYCLE_FOUND in trace.cpu().numpy()

                if has_deadlock and not found_cycle_token:
                    trace_rewards[i] += rs_config["PENALTY_NO_CYCLE_FOUND"]

                action_dicts.append(action_dict)

            next_obs_list, env_rewards, dones, _, next_masks_list = envs.step(action_dicts)
            rewards = env_rewards + trace_rewards
            next_obs_sequences = [process_observation_to_sequence(obs) for obs in next_obs_list]

            for i in range(CONFIG['NUM_WORKERS']):
                agent.buffer["states"].append(obs_sequences[i])
                agent.buffer["actions"].append(action_traces[i])
                agent.buffer["log_probs"].append(log_probs[i])
                agent.buffer["rewards"].append(rewards[i])
                agent.buffer["values"].append(values[i])
                agent.buffer["dones"].append(dones[i])

            obs_sequences = next_obs_sequences
            masks_list = next_masks_list
            episode_rewards += rewards
            episode_lengths += 1

            for i in range(CONFIG['NUM_WORKERS']):
                if dones[i]:
                    completed_ep_rewards.append(episode_rewards[i])
                    completed_ep_lengths.append(episode_lengths[i])
                    episode_rewards[i] = 0
                    episode_lengths[i] = 0

        stats = agent.update()
        end_time = time.time()
        sps = int(CONFIG['STEPS_PER_COLLECT'] / (end_time - start_time))
        current_timestep = (update_step + 1) * CONFIG['STEPS_PER_COLLECT']

        if stats:
            avg_ep_reward = np.mean(completed_ep_rewards) if completed_ep_rewards else 0.0
            avg_ep_length = np.mean(completed_ep_lengths) if completed_ep_lengths else 0.0

            # --- [核心修改 3] 在日志中加入KL散度监控 ---
            print(
                f"Timestep: {current_timestep}/{CONFIG['TOTAL_TIMESTEPS']} | SPS: {sps} | "
                f"Reward: {avg_ep_reward:.2f} | Len: {avg_ep_length:.1f} | "
                f"Actor Loss: {stats['actor_loss']:.3f} | Critic Loss: {stats['critic_loss']:.3f} | "
                f"Entropy: {stats['entropy']:.3f}"
            )

            writer.add_scalar('Performance/Steps_Per_Second', sps, current_timestep)
            if completed_ep_rewards:
                writer.add_scalar('Reward/Episode_Reward', avg_ep_reward, current_timestep)
                writer.add_scalar('Episode/Average_Length', avg_ep_length, current_timestep)
            writer.add_scalar('Loss/Actor_Loss', stats['actor_loss'], current_timestep)
            writer.add_scalar('Loss/Critic_Loss', stats['critic_loss'], current_timestep)
            writer.add_scalar('Policy/Entropy', stats['entropy'], current_timestep)
            # writer.add_scalar('Policy/KL_Divergence', stats['kl_div'], current_timestep)

        if (update_step + 1) % CONFIG['EVAL_INTERVAL'] == 0:
            print(f"\n--- [评估点] Timestep: {current_timestep} ---")
            with torch.no_grad():
                eval_success, eval_length, eval_reward = evaluate_symbolic_agent(
                    model=model,
                    config=CONFIG,
                    num_episodes=CONFIG['EVAL_EPISODES'],
                    max_steps_per_episode=MAX_EVAL_STEPS_PER_EPISODE,
                    device=device,
                    symbol_manager=symbol_manager
                )

            print(f"评估完成 - 成功率: {eval_success:.2f}% | 平均步数: {eval_length:.2f} | 平均奖励: {eval_reward:.3f}")
            writer.add_scalar('Evaluation/Success_Rate', eval_success, current_timestep)
            writer.add_scalar('Evaluation/Average_Length', eval_length, current_timestep)
            writer.add_scalar('Evaluation/Average_Reward', eval_reward, current_timestep)

            eval_model_path = os.path.join(save_dir, f"model_step_{current_timestep}.pth")
            torch.save(model.state_dict(), eval_model_path)
            print(f"评估后模型已保存至: {eval_model_path}")

    envs.close()
    writer.close()
    print("\n--- 强化学习训练完成 ---")

    final_model_path = os.path.join(save_dir, "model_final.pth")
    torch.save(model.state_dict(), final_model_path)
    print(f"最终模型已保存至: {final_model_path}")

    with open(os.path.join(save_dir, "config.json"), 'w') as f:
        json.dump(CONFIG, f, indent=4)
    print("配置已保存。")

