import json
import os
import random
from typing import Any

import requests
import gymnasium as gym
import numpy as np

from ray.tune import register_env
from ray import air, tune
from ray.rllib.core.columns import Columns
from ray.rllib.core.rl_module.rl_module import RLModuleSpec, RLModule
from ray.rllib.core.rl_module.multi_rl_module import MultiRLModuleSpec
from ray.rllib.core.rl_module.default_model_config import DefaultModelConfig
import ray
from ray.rllib.algorithms.ppo import PPOConfig, PPO
from ray.rllib.connectors.env_to_module import FlattenObservations, EnvToModulePipeline
from ray.rllib.connectors.module_to_env import ModuleToEnvPipeline
from ray.rllib.core import (
    COMPONENT_ENV_RUNNER,
    COMPONENT_ENV_TO_MODULE_CONNECTOR,
    COMPONENT_MODULE_TO_ENV_CONNECTOR,
    COMPONENT_LEARNER_GROUP,
    COMPONENT_LEARNER,
    COMPONENT_RL_MODULE,
    DEFAULT_MODULE_ID,
)
from ray.rllib.algorithms.ppo.torch.default_ppo_torch_rl_module import DefaultPPOTorchRLModule
from ray.rllib.algorithms.algorithm import Algorithm
from ray.rllib.env.multi_agent_env import make_multi_agent
from ray.rllib.env.single_agent_episode import SingleAgentEpisode
from ray.rllib.env.multi_agent_episode import MultiAgentEpisode
from ray.rllib.utils.metrics import (
    ENV_RUNNER_RESULTS,
    EPISODE_RETURN_MEAN,
    TRAINING_ITERATION_TIMER,
    LEARNER_GROUP,
    LEARNER_RESULTS,
    NUM_ENV_STEPS_SAMPLED_LIFETIME
)
from ray.rllib.utils.numpy import convert_to_numpy, softmax
import torch

OBS_N = 6
TERMINATED_HIT_N = 6
TERMINATED_STEP_N = 20000
URL = 'http://www.cwl.gov.cn/cwl_admin/front/cwlkj/search/kjxx/findDrawNotice?name=ssq&pageNo=1&pageSize=1000&systemType=PC'
USER_AGENTS = ['Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.1 (KHTML, like Gecko) Chrome/14.0.835.163 Safari/535.1','Mozilla/5.0 (Windows NT 6.1; WOW64; rv:6.0) Gecko/20100101 Firefox/6.0','Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; .NET4.0C; InfoPath.3)', ]
class LuckyNumberEnv(gym.Env):
    def __init__(self):
        self.reset_num = 0
        self.action_space = gym.spaces.MultiDiscrete([33, 33, 33, 33, 33, 33, 16])
        self.observation_space = gym.spaces.Dict(
                {
                    f"{id}": gym.spaces.Dict({
                        "red":gym.spaces.MultiDiscrete([33, 33, 33, 33, 33, 33], dtype=np.int16),
                        "blue":gym.spaces.Discrete(16),
                        "year":gym.spaces.Discrete(2030),
                        "num_of_year":gym.spaces.Discrete(1000),
                    })
                    for id in range(OBS_N)
                } 
            )

    def reset(self, *, seed: int | None = None, options: dict[str, Any] | None = None) -> tuple[Any, dict[str, Any]]:
        super().reset(seed=seed, options=options)
        options = options or {}
        self.reset_num += 1
        self.hit_num = 0
        self.reward_sum = 0
        self.curr_step = 0
        #r = requests.get(url=URL, headers={'User-Agent': random.choice(USER_AGENTS)})  
        vLogList = []
        #if r.ok:
        #    vLogList = r.json()['result']
        #else:
        #    print(r)
        f = open('ssq.json', 'r', encoding='utf-8')
        vLogList = json.loads(f.read())['result']
        f.close()
        #print(vLogList)
        self.history_luckys = [{
            "year":int(l["code"]) // 1000,
            "num_of_year":int(l["code"]) % 1000,
            "red": [int(n) - 1 for n in l["red"].split(",")],
            "blue": int(l["blue"]) - 1,
            } for l in vLogList]
        # print(self.history_luckys)
        self.action_to_history = {
            str(x["red"])+str(x["blue"]): x for x in self.history_luckys if x["red"].sort() or True
        }
        #self.history_index = options.get("history_index", self.np_random.integers(OBS_N, len(self.history_luckys) - OBS_N))
        #self.history_index = options.get("history_index", len(self.history_luckys) - OBS_N)
        self.history_index = options.get("history_index", OBS_N * 2)
        print("reset", {"reset_num":self.reset_num, "history_index":self.history_index}, vLogList[0]["date"])
        return {f"{id}": self.history_luckys[(id+self.history_index) % len(self.history_luckys)] for id in range(OBS_N)},  {"history_index": self.history_index, "hit_num": self.hit_num, "reward_sum": self.reward_sum}

    def step(self, action) -> tuple[Any, float, bool, bool, dict[str, Any]]:
        self.curr_step += 1
        red = list(set(action[0:6].tolist()))
        blue = action[6]
        red.sort()
        key = str([int(x) for x in red]) + str(blue)
        history = self.action_to_history.get(key)
        perfect_history = self.history_luckys[(self.history_index - 1) % len(self.history_luckys)]
        reward = 0
        red_hit = 0
        for a in red:
            if a in perfect_history["red"]:
                red_hit += 1
        if history is not None and history == perfect_history: #perfect hit
            reward += 1
        elif red_hit >= 3 and perfect_history["blue"] == blue: #blue hit
            reward += (0.1 * (red_hit - 2))
            reward += 0.2
        self.reward_sum += reward
        if reward >= 0.3:
            self.hit_num += 1
            self.history_index -= 1
        else:
            reward -= 0.01
        #self.history_index = self.history_luckys.index(history) if history is not None else self.np_random.integers(0, len(self.history_luckys))
        terminated = self.hit_num >= TERMINATED_HIT_N and self.reward_sum >= self.hit_num * 0.3 + 0.2 
        truncated = not terminated and (self.history_index == 0 or self.curr_step >= TERMINATED_STEP_N)
        info = {"history_index": self.history_index, "hit_num": self.hit_num, "reward_sum": self.reward_sum, "red_hit": red_hit}
        if reward > 0:
            print("step", self.curr_step, info)
        return {f"{id}": self.history_luckys[(id+self.history_index) % len(self.history_luckys)] for id in range(OBS_N)}, reward, terminated, truncated, info

if __name__ == '__main__':
    register_env(
        "LuckyNumber",
        lambda _: LuckyNumberEnv(),
    )

    config = (
        PPOConfig()
        .environment("LuckyNumber")
        .api_stack(
            enable_env_runner_and_connector_v2=False,
            enable_rl_module_and_learner=False,
        )
        .env_runners(
            num_env_runners=2,
            num_cpus_per_env_runner=3,
            num_gpus_per_env_runner=0.5,
            #batch_mode="complete_episodes",
            #rollout_fragment_length="auto",
            sample_timeout_s=180,
            create_env_on_local_worker=True,
        )
        .framework("torch")
        .training(
            lr=0.001,
            gamma=0.99,
            train_batch_size=TERMINATED_STEP_N // 2,
        )
        .debugging(log_level="DEBUG")
    )

    metric_key = f"{ENV_RUNNER_RESULTS}/{EPISODE_RETURN_MEAN}"
    # 启动训练
    tuner = tune.Tuner(
        "PPO",
        param_space=config,
        run_config=air.RunConfig(
            stop={
                TRAINING_ITERATION_TIMER: 10, #训练N轮
                #NUM_ENV_STEPS_SAMPLED_LIFETIME: TERMINATED_STEP_N,
                metric_key: TERMINATED_HIT_N,
            },
            checkpoint_config=air.CheckpointConfig(
                checkpoint_at_end=True
            ),
        ),
    )

    results = tuner.fit()
    print("results", results)
    # Get the last checkpoint from the above training run.
    best_result = results.get_best_result(metric=metric_key, mode="max")
    print("Best result:", best_result)
  
