import json
from typing import Any

import requests
import gymnasium as gym
import numpy as np

from ray.tune import register_env
from ray import air, tune
from ray.rllib.core.columns import Columns
from ray.rllib.core.rl_module.rl_module import RLModuleSpec, RLModule
from ray.rllib.core.rl_module.multi_rl_module import MultiRLModuleSpec
from ray.rllib.core.rl_module.default_model_config import DefaultModelConfig
import ray
from ray.rllib.algorithms.ppo import PPOConfig, PPO
from ray.rllib.algorithms.dqn import DQNConfig
from ray.rllib.connectors.env_to_module import FlattenObservations, EnvToModulePipeline
from ray.rllib.connectors.module_to_env import ModuleToEnvPipeline
from ray.rllib.core import (
    COMPONENT_ENV_RUNNER,
    COMPONENT_ENV_TO_MODULE_CONNECTOR,
    COMPONENT_MODULE_TO_ENV_CONNECTOR,
    COMPONENT_LEARNER_GROUP,
    COMPONENT_LEARNER,
    COMPONENT_RL_MODULE,
    DEFAULT_MODULE_ID,
)
from ray.rllib.algorithms.ppo.torch.default_ppo_torch_rl_module import DefaultPPOTorchRLModule
from ray.rllib.algorithms.algorithm import Algorithm
from ray.rllib.env.multi_agent_env import make_multi_agent
from ray.rllib.env.single_agent_episode import SingleAgentEpisode
from ray.rllib.env.multi_agent_episode import MultiAgentEpisode
from ray.rllib.utils.metrics import (
    ENV_RUNNER_RESULTS,
    EPISODE_RETURN_MEAN,
    TRAINING_ITERATION_TIMER,
    LEARNER_GROUP,
    LEARNER_RESULTS,
    NUM_ENV_STEPS_SAMPLED_LIFETIME
)
from ray.rllib.utils.numpy import convert_to_numpy, softmax
import torch

TERMINATED_STEP_N = 1 << 10
class SimpleMathEnv(gym.Env):
    def __init__(self):
        self.reset_num = 0
        self.action_space = gym.spaces.Discrete(11)
        self.observation_space = gym.spaces.Dict(
                {
                    "a":gym.spaces.Discrete(6),
                    "b":gym.spaces.Discrete(6),
                } 
            )
        self.pools = {
            "0+0":0,
            "1+0":1,
            "1+1":2,
            "2+0":2,
            "2+1":3,
            "2+2":4,
            "3+0":3,
            "3+1":4,
            "3+2":5,
            "3+3":6,
            "4+0":4,
            "4+1":5,
            "4+2":6,
            "4+3":7,
            "4+4":8,
            "5+0":5,
            "5+1":6,
            # "5+2":7,
            "5+3":8,
            # "5+4":9,
            "5+5":10,
        }
        self.val_to_keys = {}
        for k, v in self.pools.items():
            keys = self.val_to_keys.get(str(v)) or [] 
            keys.append(k)
            self.val_to_keys[str(v)] = keys

    def reset(self, *, seed: int | None = None, options: dict[str, Any] | None = None) -> tuple[Any, dict[str, Any]]:
        super().reset(seed=seed, options=options)
        options = options or {}
        self.reset_num = 0
        self.curr_step = 0
        self.target = np.random.choice([int(x) for x in self.val_to_keys.keys()])
        keys = [int(x) for x in self.np_random.choice(self.val_to_keys.get(str(self.target))).split("+")]
        self.np_random.shuffle(keys)
        info = {"target": self.target}
        return {"a": keys[0], "b": keys[1]}, info

    def step(self, action) -> tuple[Any, float, bool, bool, dict[str, Any]]:
        self.curr_step += 1
        reward = 1 if self.target == action else 0
        terminated = True if reward > 0 else False
        truncated = not terminated and self.curr_step >= TERMINATED_STEP_N
        info = {"target": self.target}
        keys = [int(x) for x in self.np_random.choice(self.val_to_keys.get(str(self.target))).split("+")]
        self.np_random.shuffle(keys)
        return {"a": keys[0], "b": keys[1]}, reward, terminated, truncated, info

import argparse
parser = argparse.ArgumentParser()
parser.add_argument(
    "--turn",
    type=int,
    default=2,
    help="turn",
)
parser.add_argument(
    "--turn-infer",
    type=int,
    default=10,
    help="infer num of a trun.",
)
if __name__ == '__main__':
    args = parser.parse_args()
    # env = SimpleMathEnv()
    # env.reset()
    # for i in range(100):
    #     action = env.action_space.sample()
    #     obs, reward, terminated, truncated, info = env.step(action)
    #     if terminated:
    #         print(i, obs, action, info)
    #         env.reset()
    # print("check env end")
    register_env(
        "SimpleMathEnv",
        lambda _: SimpleMathEnv(),
    )

    config = (
        PPOConfig()
        .environment("SimpleMathEnv")
        .api_stack(
            enable_env_runner_and_connector_v2=False,
            enable_rl_module_and_learner=False,
        )
        .env_runners(
            num_env_runners=1,
            num_cpus_per_env_runner=3,
            num_gpus_per_env_runner=0,
            #batch_mode="complete_episodes",
            rollout_fragment_length=TERMINATED_STEP_N,
            sample_timeout_s=3600,
            create_env_on_local_worker=True,
        )
        .framework("torch")
        .training(
            lr=0.001,
            gamma=0.99,
            train_batch_size=TERMINATED_STEP_N,
        )
        #.reporting(metrics_num_episodes_for_smoothing=2)
        .debugging(log_level="DEBUG")
    )

    # algo = config.build_algo()
    # result = algo.train()
    # print("result==========>", result)
    metric_key = f"{ENV_RUNNER_RESULTS}/{EPISODE_RETURN_MEAN}"
    # 启动训练
    tuner = tune.Tuner(
        "PPO",
        param_space=config,
        run_config=air.RunConfig(
            stop={
                TRAINING_ITERATION_TIMER: 20, #训练N轮
                # metric_key: 1,
            },
            checkpoint_config=air.CheckpointConfig(
                checkpoint_at_end=True
            ),
        ),
    )

    results = tuner.fit()
    print("results", results)
    # Get the last checkpoint from the above training run.
    best_result = results.get_best_result(metric=metric_key, mode="max")
    print("Best result:", best_result)
  
    if(best_result is not None):
        algo = Algorithm.from_checkpoint(best_result.checkpoint.path)
        env = SimpleMathEnv()
        for n in range(args.turn):
            print(">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>", n)
            for i in range(args.turn_infer):
                reset_obs, info = env.reset()
                action = algo.compute_single_action(reset_obs) # 使用模型决策
                print(f"infer[{i}]", action, reset_obs)
        print(algo.compute_single_action({"a":4, "b":5}))
        print(algo.compute_single_action({"a":2, "b":5}))
