import numpy as np
import pandas as pd
import gym
from gym.spaces import MultiDiscrete
from learning_to_adapt.FJSP_py import FlexibleJobShopSchedulingProblem
from learning_to_adapt.envs.base import Env
from learning_to_adapt.utils.serializable import Serializable
from Env import EnvFJSP


class GymEnvFJSP(Env, EnvFJSP, gym.Env, Serializable):
    def __init__(self, problem, *args, **kwargs):
        Serializable.quick_init(self, locals())
        EnvFJSP.__init__(problem, *args, **kwargs)
        self._set_action_space()




    def step(self, action, *args, **kwargs):

        action, machine_id = self.action_trans_down(action)

        GymEnvFJSP.step(action, machine_id, *args, **kwargs)

    def _set_action_space(self):
        """从self.problem中获取"""

        self._action_space = MultiDiscrete(self.problem.machines_operation)

    def _action_trans_down(self, action):
        """gym.Space -> FJSP Space"""


    def _action_trans_up(action, machine_id):
        """FJSP Space -> gym.Space"""
        
    @property
    def observation_space(self):
        return self._observation_space

    @property
    def action_space(self):
        return self._action_space

if __name__ == "__main__":

    import time
    problem = FlexibleJobShopSchedulingProblem.load("/home/wlc/deep-learning/dataset/Benchmark/MK_Case/Mk01.fjs")
    env = GymEnvFJSP(problem=problem, allow_operation_to_left_shift=True)
    while True:
        env.reset()
        for _ in range(1000):
            observation, reward, done, info = env.step(env.action_space.sample())
            env.render()
            print(observation, reward, done, info)
            if done:
                time.sleep(1)
                env.reset()
