#!/usr/bin/env python3
# -*- coding: UTF-8 -*-
###########################################################################
# Copyright © 1998 - 2025 Tencent. All Rights Reserved.
###########################################################################
"""
Author: Tencent AI Arena Authors
"""

import numpy as np
import time
import os
import torch
from kaiwu_agent.utils.common_func import Frame, attached

from tools.train_env_conf_validate import read_usr_conf
from agent_ppo.feature.definition import SampleManager
from tools.metrics_utils import get_training_metrics
from agent_ppo.conf.conf import Config


@attached
def workflow(envs, agents, logger=None, monitor=None):
    try:
        env, agent = envs[0], agents[0]
        episode_num_every_epoch = 1
        last_save_model_time = 0
        last_put_data_time = 0
        monitor_data = {}

        # Local checkpoint settings
        local_ckpt_dir = "/data/projects/back_to_the_realm_v2/ckpt"
        save_interval = 1800  # 30 minutes in seconds
        
        # Ensure local checkpoint directory exists (training machine only)
        _ensure_local_checkpoint_dir(local_ckpt_dir, logger)

        # Read and validate configuration file
        usr_conf = read_usr_conf("agent_ppo/conf/train_env_conf.toml", logger)
        if usr_conf is None:
            logger.error(f"usr_conf is None, please check agent_ppo/conf/train_env_conf.toml")
            return

        while True:
            for g_data, monitor_data in run_episodes(episode_num_every_epoch, env, agent, usr_conf, logger, monitor):
                # Train the agent
                agent.learn(g_data)
                g_data.clear()

            now = time.time()
            
            # Save both platform and local models every 30 minutes
            if now - last_save_model_time >= save_interval:
                # Platform model saving
                agent.save_model()  # Uses platform wrapper
                logger.info("[PLATFORM] Model saved via platform system")
                
                # Local checkpoint saving
                _save_local_checkpoint(agent, local_ckpt_dir, logger)
                
                last_save_model_time = now

            # Report monitoring metrics
            if now - last_put_data_time >= 60:
                # Add training step to monitoring data
                monitor_data.update({
                    "training_step": getattr(agent.algorithm, 'training_step', 0),
                    "learning_rate": getattr(agent.algorithm, 'lr', 0),
                    "var_beta": getattr(agent.algorithm, 'var_beta', 0),
                })
                monitor.put_data({os.getpid(): monitor_data})
                last_put_data_time = now

    except Exception as e:
        logger.error(f"workflow error: {e}")
        raise RuntimeError(f"workflow error")


def _ensure_local_checkpoint_dir(local_ckpt_dir, logger):
    """Ensure local checkpoint directory exists (only on training machine)"""
    try:
        if not os.path.exists(local_ckpt_dir):
            os.makedirs(local_ckpt_dir, exist_ok=True)
            logger.info(f"[LOCAL] Created checkpoint directory: {local_ckpt_dir}")
    except Exception:
        # Silently fail on platform - this is expected
        pass


def _save_local_checkpoint(agent, local_ckpt_dir, logger):
    """Save comprehensive local training checkpoint"""
    try:
        current_step = getattr(agent.algorithm, 'training_step', 0)
        timestamp = int(time.time())
        checkpoint_path = f"{local_ckpt_dir}/training_checkpoint_step_{current_step}_{timestamp}.pth"
        
        # Comprehensive checkpoint with all training state
        checkpoint = {
            'training_step': current_step,
            'model_state_dict': agent.algorithm.model.state_dict(),
            'actor_state_dict': agent.model.state_dict(),
            'optimizer_state_dict': agent.algorithm.optimizer.state_dict(),
            'lr': getattr(agent.algorithm, 'lr', Config.START_LR),
            'var_beta': getattr(agent.algorithm, 'var_beta', Config.BETA_START),
            'clip_param': getattr(agent.algorithm, 'clip_param', Config.CLIP_PARAM),
            'vf_coef': getattr(agent.algorithm, 'vf_coef', Config.VF_COEF),
            'win_history': getattr(agent, 'win_history', []),
            'preprocessor_state': {
                'discovered_treasures': getattr(agent.preprocessor, 'discovered_treasures', {}),
                'discovered_buffs': getattr(agent.preprocessor, 'discovered_buffs', {}),
                'discovered_endpoints': getattr(agent.preprocessor, 'discovered_endpoints', {}),
                'step_count': getattr(agent.preprocessor, 'step_count', 0),
                'previous_total_score': getattr(agent.preprocessor, 'previous_total_score', 0.0),
                'previous_treasure_count': getattr(agent.preprocessor, 'previous_treasure_count', 0),
                'no_movement_count': getattr(agent.preprocessor, 'no_movement_count', 0),
                'previous_hero_pos': getattr(agent.preprocessor, 'previous_hero_pos', None),
            },
            'timestamp': timestamp,
        }
        
        torch.save(checkpoint, checkpoint_path)
        logger.info(f"[LOCAL] Saved training checkpoint at step {current_step}: {checkpoint_path}")
        
        # Cleanup old checkpoints (keep last 5)
        _cleanup_local_checkpoints(local_ckpt_dir, logger)
        
    except Exception as e:
        # Only log on training machine
        if os.path.exists("/data/projects"):
            logger.warning(f"[LOCAL] Failed to save checkpoint: {e}")


def _cleanup_local_checkpoints(local_ckpt_dir, logger, keep_last=5):
    """Keep only the last N local checkpoints"""
    try:
        checkpoint_files = []
        for f in os.listdir(local_ckpt_dir):
            if f.startswith("training_checkpoint_step_") and f.endswith(".pth"):
                try:
                    # Extract timestamp from filename for sorting
                    parts = f.replace(".pth", "").split("_")
                    if len(parts) >= 4:
                        timestamp = int(parts[-1])
                        checkpoint_files.append((timestamp, f))
                except ValueError:
                    continue
        
        # Sort by timestamp and keep only the most recent
        checkpoint_files.sort()
        if len(checkpoint_files) > keep_last:
            files_to_remove = checkpoint_files[:-keep_last]
            removed_count = 0
            for _, filename in files_to_remove:
                try:
                    os.remove(os.path.join(local_ckpt_dir, filename))
                    removed_count += 1
                except OSError:
                    pass
            
            if removed_count > 0:
                logger.info(f"[LOCAL] Cleaned up {removed_count} old checkpoints")
                
    except Exception:
        pass  # Silently fail


def run_episodes(n_episode, env, agent, usr_conf, logger, monitor):
    try:
        for episode in range(n_episode):
            collector = SampleManager()
            win_rate = 0

            # Retrieving training metrics
            training_metrics = get_training_metrics()
            if training_metrics:
                logger.info(f"training_metrics is {training_metrics}")

            # Reset the task and get the initial state
            obs, extra_info = env.reset(usr_conf=usr_conf)
            if extra_info["result_code"] < 0:
                logger.error(
                    f"env.reset result_code is {extra_info['result_code']}, result_message is {extra_info['result_message']}"
                )
                raise RuntimeError(extra_info["result_message"])
            elif extra_info["result_code"] > 0:
                continue

            # At the start of each game, support loading the latest model file
            agent.reset()
            agent.load_model(id="latest")

            done = False
            step = 0
            diy_1 = 0
            diy_2 = 0
            diy_3 = 0
            diy_4 = 0
            diy_5 = 0

            max_step_no = int(os.environ.get("max_step_no", "0"))

            while not done:
                # Feature processing
                obs_data = agent.observation_process(obs, extra_info)

                # Agent performs inference
                act_data, model_version = agent.predict(list_obs_data=[obs_data])

                # Unpack ActData into action
                act = agent.action_process(act_data[0])

                # Interact with the environment
                step_no, _obs, terminated, truncated, _extra_info = env.step(act)
                if _extra_info["result_code"] != 0:
                    logger.warning(
                        f"_extra_info.result_code is {_extra_info['result_code']}, \
                        _extra_info.result_message is {_extra_info['result_message']}"
                    )
                    break

                step += 1
                reward = obs_data.reward

                # Construct task frames
                collector.sample_process(
                    feature=obs_data.feature,
                    legal_action=obs_data.legal_action,
                    prob=[act_data[0].prob],
                    action=[act_data[0].action],
                    value=act_data[0].value,
                    reward=np.array(reward),
                )

                # Determine task over
                game_info = _extra_info["game_info"]
                final_reward = 0
                if truncated:
                    win_rate = agent.update_win_rate(False)
                    final_reward = -3
                    logger.info(
                        f"Game truncated! step_no:{step_no} score:{game_info['total_score']} win_rate:{win_rate}"
                    )
                elif terminated:
                    win_rate = agent.update_win_rate(True)
                    final_reward = 10
                    logger.info(
                        f"Game terminated! step_no:{step_no} score:{game_info['total_score']} win_rate:{win_rate}"
                    )
                done = terminated or truncated or (max_step_no > 0 and step >= max_step_no)

                # If the task is over, process samples
                if done:
                    if monitor:
                        monitor_data = {
                            "diy_1": win_rate,
                            "diy_2": diy_2,
                            "diy_3": diy_3,
                            "diy_4": diy_4,
                            "diy_5": diy_5,
                        }
                    collector.process_last_frame(np.array([final_reward]))
                    if len(collector.samples) > 0:
                        yield collector.get_game_data(), monitor_data
                    break

                # Status update
                obs = _obs
                extra_info = _extra_info

    except Exception as e:
        logger.error(f"run_episodes error: {e}")
        raise RuntimeError(f"run_episodes error")