#!/usr/bin/env python3
# -*- coding: UTF-8 -*-
###########################################################################
# Copyright © 1998 - 2025 Tencent. All Rights Reserved.
###########################################################################
"""
Author: Tencent AI Arena Authors
"""

import torch
import os
import random
import numpy as np
from kaiwu_agent.agent.base_agent import (
    predict_wrapper,
    exploit_wrapper,
    learn_wrapper,
    save_model_wrapper,
    load_model_wrapper,
    BaseAgent,
)
from kaiwu_agent.utils.common_func import attached
from agent_ppo.model.model import NetworkModelActor
from agent_ppo.algorithm.algorithm import Algorithm
from agent_ppo.feature.definition import SampleData, ObsData, ActData, SampleManager
from agent_ppo.feature.preprocessor import Preprocessor
from agent_ppo.conf.conf import Config


def random_choice(p):
    r = random.random() * sum(p)
    s = 0
    for i in range(len(p)):
        if r > s and r <= s + p[i]:
            return i, p[i]
        s += p[i]
    return len(p) - 1, p[len(p) - 1]


@attached
class Agent(BaseAgent):
    def __init__(self, agent_type="player", device=None, logger=None, monitor=None):
        super().__init__(agent_type, device, logger, monitor)

        self.device = device
        self.model = NetworkModelActor()  # Actor model for inference
        self.algorithm = Algorithm(device=device, logger=logger, monitor=monitor)  # Contains learner model
        self.preprocessor = Preprocessor()
        self.sample_manager = SampleManager()
        self.win_history = []
        self.logger = logger
        
        # Load pre-trained model if specified
        if Config.LOAD_MODEL_ID:
            self.logger.info(f"Load pre-trained model ID: {Config.LOAD_MODEL_ID}")
            self.__load_model(
                path="/data/projects/back_to_the_realm_v2/ckpt",
                id=Config.LOAD_MODEL_ID,
            )
        
        # Sync actor model with learner model initially
        self._sync_models()
        self.reset()

    def _sync_models(self):
        """Synchronize actor model with learner model parameters"""
        self.model.load_state_dict(self.algorithm.model.state_dict())

    def update_win_rate(self, is_win):
        self.win_history.append(is_win)
        if len(self.win_history) > 100:
            self.win_history.pop(0)
        return sum(self.win_history) / len(self.win_history) if len(self.win_history) > 10 else 0

    def _predict(self, obs, legal_action):
        with torch.no_grad():
            inputs = self.model.format_data(obs, legal_action)
            output_list = self.model(*inputs)

        np_output_list = []
        for output in output_list:
            np_output_list.append(output.numpy().flatten())

        return np_output_list

    def predict_process(self, obs, legal_action):
        obs = np.array([obs])
        legal_action = np.array([legal_action])
        probs, value = self._predict(obs, legal_action)
        return probs, value

    def observation_process(self, obs, extra_info=None):
        feature, legal_action, reward = self.preprocessor.process([obs, extra_info], self.last_action)

        return ObsData(
            feature=feature,
            legal_action=legal_action,
            reward=reward,
        )

    @predict_wrapper
    def predict(self, list_obs_data):
        feature = list_obs_data[0].feature
        legal_action = list_obs_data[0].legal_action
        probs, value = self.predict_process(feature, legal_action)
        action, prob = random_choice(probs)
        return [ActData(probs=probs, value=value, action=action, prob=prob)]

    def action_process(self, act_data):
        return act_data.action

    @exploit_wrapper
    def exploit(self, observation):
        obs_data = self.observation_process(observation["obs"], observation["extra_info"])
        feature = obs_data.feature
        legal_action = obs_data.legal_action
        probs, value = self.predict_process(feature, legal_action)
        action, prob = random_choice(probs)
        act = self.action_process(ActData(probs=probs, value=value, action=action, prob=prob))
        return act

    def reset(self):
        self.preprocessor.reset()
        self.last_prob = 0
        self.last_action = -1

    @learn_wrapper
    def learn(self, list_sample_data):
        """Train the model and sync actor with learner"""
        self.algorithm.learn(list_sample_data)
        self._sync_models()

    @save_model_wrapper
    def save_model(self, path=None, id="1"):
        """Platform model saving - simple format for platform compatibility"""
        model_file_path = f"{path}/model.ckpt-{str(id)}.pkl"
        
        try:
            os.makedirs(path, exist_ok=True)
            # Platform expects simple state dict format
            torch.save(self.algorithm.model.state_dict(), model_file_path)
            self.logger.info(f"[PLATFORM] save model {model_file_path} successfully")
            
        except Exception as e:
            self.logger.error(f"Failed to save platform model {model_file_path}: {str(e)}")

    @load_model_wrapper
    def load_model(self, path=None, id="1"):
        """Platform model loading wrapper"""
        self.__load_model(path, id)

    def __load_model(self, path=None, id="1"):
        """Load model - simple path logic as specified"""
        model_file_path = f"{path}/model.ckpt-{str(id)}.pth"
        
        try:
            checkpoint = torch.load(model_file_path, map_location=self.device)
            
            # ✅ Load and LOG the training step
            if 'training_step' in checkpoint:
                old_step = getattr(self.algorithm, 'training_step', 0)
                self.algorithm.training_step = checkpoint['training_step']
                self.logger.info(f"🔥 HOT START: Resumed from step {self.algorithm.training_step} (was {old_step})")
            
            # ✅ Load and LOG optimizer state
            if 'optimizer_state_dict' in checkpoint:
                self.algorithm.optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
                current_lr = self.algorithm.optimizer.param_groups[0]['lr']
                self.logger.info(f"🔥 HOT START: Resumed with LR {current_lr}")
            
            # ✅ Load and LOG algorithm parameters
            if 'var_beta' in checkpoint:
                self.algorithm.var_beta = checkpoint['var_beta']
                self.logger.info(f"🔥 HOT START: Resumed with var_beta {self.algorithm.var_beta}")
                
            self.logger.info("✅ HOT START CONFIRMED - Training will continue from checkpoint")
            
        except FileNotFoundError:
            self.logger.info("❄️ COLD START - Training from scratch")
            #exit(1)
        except Exception as e:
            self.logger.error(f"Failed to load model {model_file_path}: {str(e)}")
            #exit(1)