import random
import sys
import numpy as np
from core.env import Env
from core.vtype import ContinuousNormal, NamedCategorical
sys.path.append("/data1/yxn/repast-city/")
from mpi4py import MPI
from myModel import Model

# self.pt.x, self.pt.y, self.walk_time, self.sleep_time, best_order.id, best_order.pickup_location[0], best_order.pickup_location[1], best_order.delivery_location[0], best_order.delivery_location[1], utility
# 定义常量
POSITION_X = 'position_x'
POSITION_Y = 'position_y'
WORK_TIME = 'work_time'
SLEEP_TIME = 'sleep_time'
ORDER_ID = 'order_id'
ORDER_PICKUP_X = 'order_pickup_x'
ORDER_PICKUP_Y = 'order_pickup_y'
ORDER_DELIVERY_X = 'order_delivery_x'
ORDER_DELIVERY_Y = 'order_delivery_y'
ORDER_MONEY = 'order_money'
PERFORMANCE = 'performance'
W1 = 'w1'
W2 = 'w2'
W3 = 'w3'

ACTION = 'deal_order'

NEXT = {name: Env.name_next(name) for name in [POSITION_X, POSITION_Y, WORK_TIME, SLEEP_TIME, ORDER_ID, ORDER_PICKUP_X,ORDER_PICKUP_Y,ORDER_DELIVERY_X,ORDER_DELIVERY_Y,ORDER_MONEY,PERFORMANCE,W1,W2,W3]}

class MeituanMicro(Env):
    def __init__(self, args):
        super().__init__(args)
        # Define all the parameters needed for Model initialization
        params = [
            0,  # run_time
            MPI.COMM_WORLD,  # comm
            None,  # websocket
            10,  # rider_len
            [300, 300, 300, 300, 300 ,300, 300, 300, 300, 300],  # sleep_time
            [2, 2, 2, 3, 4, 2, 2, 2, 3, 4],  # walk_time
            3,  # intervention_strategy
            0.5,  # percentage
            [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],  # learning_types
            [1, 1, 1, 1, 1, 1, 1, 1, 1, 1],  # array_sex (optional)
            [10, 10, 10, 10, 10, 10, 10, 10, 10, 10],  # array_diligent (optional)
            None,  # regulation_type (optional)
            'ER',  # network_type (optional)
            0.8,  # prob (optional)
            1, # 1 with reposition；0 without 
            3,  # BA_add_edges (optional)
            3,  # WS_init_neighbors (optional)
            3,  # RG_init_degree (optional)
            None,  # order_prob (optional)
            3,  # order_BF (optional)
            0  # npc_num (optional)
        ]

        # Create an instance of the Model class with the parameters list
        self.__core = Model(*params)


    def define(self, args):
        _def = Env.Definition()
            # Define all the parameters needed for Model initialization
        params = [
            0,  # run_time
            MPI.COMM_WORLD,  # comm
            None,  # websocket
            10,  # rider_len
            [300, 300, 300, 300, 300 ,300, 300, 300, 300, 300],  # sleep_time
            [2, 2, 2, 3, 4, 2, 2, 2, 3, 4],  # walk_time
            3,  # intervention_strategy
            0.5,  # percentage
            [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],  # learning_types
            [1, 1, 1, 1, 1, 1, 1, 1, 1, 1],  # array_sex (optional)
            [10, 10, 10, 10, 10, 10, 10, 10, 10, 10],  # array_diligent (optional)
            None,  # regulation_type (optional)
            'ER',  # network_type (optional)
            0.8,  # prob (optional)
            1, # 1 with reposition；0 without 
            3,  # BA_add_edges (optional)
            3,  # WS_init_neighbors (optional)
            3,  # RG_init_degree (optional)
            None,  # order_prob (optional)
            3,  # order_BF (optional)
            0  # npc_num (optional)
        ]

        # Create an instance of the Model class with the parameters list
        self.__core = Model(*params)

        _def.state(POSITION_X,(ContinuousNormal(scale=None) ))
        _def.state(POSITION_Y,(ContinuousNormal(scale=None) ))
        _def.state(WORK_TIME, (ContinuousNormal(scale=None) ))
        _def.state(SLEEP_TIME, (ContinuousNormal(scale=None) ))
        _def.state(ORDER_ID,(ContinuousNormal(scale=None) ))
        _def.state(ORDER_PICKUP_X,(ContinuousNormal(scale=None) ))
        _def.state(ORDER_PICKUP_Y,(ContinuousNormal(scale=None) ))
        _def.state(ORDER_DELIVERY_X,(ContinuousNormal(scale=None) ))
        _def.state(ORDER_DELIVERY_Y,(ContinuousNormal(scale=None) ))
        _def.state(ORDER_MONEY,(ContinuousNormal(scale=None) ))
        _def.state(PERFORMANCE,(ContinuousNormal(scale=None) ))
        _def.state(W1,(ContinuousNormal(scale=None) ))
        _def.state(W2,(ContinuousNormal(scale=None) ))
        _def.state(W3,(ContinuousNormal(scale=None) ))
        _def.action(ACTION, NamedCategorical('accept_order', 'reject_order' ))
        # _def.reward('order completed', [NEXT[ORDER_STATE]], lambda x: np.sum(np.where((x == 2) | (x == 'completed'), 1.0, 0.0)))
        # _def.reward('performance', [NEXT[PERFORMANCE]], lambda x:  x) # 绩效标准化一下
        # _def.outcome(ORDER_MONEY,(ContinuousNormal(scale=None)))
        _def.reward('w1', [NEXT[W1]], lambda x:  x) 
        _def.reward('w2', [NEXT[W2]], lambda x:  x) 
        _def.reward('w3', [NEXT[W3]], lambda x:  x)
        _def.reward('order money', [NEXT[ORDER_MONEY]], lambda x:  x)

        return _def
        
    @classmethod
    def init_parser(cls, parser):
        parser.add_argument("--render", action="store_true", default=False, help="render the environment in a window.")
    
    def launch(self):
        # Define all the parameters needed for Model initialization
        params = [
            0,  # run_time
            MPI.COMM_WORLD,  # comm
            None,  # websocket
            10,  # rider_len
            [300, 300, 300, 300, 300 ,300, 300, 300, 300, 300],  # sleep_time
            [2, 2, 2, 3, 4, 2, 2, 2, 3, 4],  # walk_time
            3,  # intervention_strategy
            0.5,  # percentage
            [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],  # learning_types
            [1, 1, 1, 1, 1, 1, 1, 1, 1, 1],  # array_sex (optional)
            [10, 10, 10, 10, 10, 10, 10, 10, 10, 10],  # array_diligent (optional)
            None,  # regulation_type (optional)
            'ER',  # network_type (optional)
            0.8,  # prob (optional)
            1, # 1 with reposition；0 without 
            3,  # BA_add_edges (optional)
            3,  # WS_init_neighbors (optional)
            3,  # RG_init_degree (optional)
            None,  # order_prob (optional)
            3,  # order_BF (optional)
            0  # npc_num (optional)
        ]

        # Create an instance of the Model class with the parameters list
        self.__core = Model(*params)


    def reset(self):
        super().reset()
        return self.__core.reset(0)
    
    def init_episode(self, *args, **kwargs):
        obs = self.__core.reset(0)
        return self._state_variables(obs)
    
    def transit(self, action):
        a = action[ACTION]
        # step(self, action=-1, action_region=-1, return_info_type=0, control_id=0, reposition_type=0)
        # # 不重调度
        # obs = self.__core.step(action=a, action_region=-1, return_info_type=0, control_id=0, reposition_type=0)
        # 重调度
        obs = self.__core.step(action=a, action_region=-1, return_info_type=0, control_id=0, reposition_type=1)
        # 包装返回的状态和信息
        s = self._state_variables(obs)
        tran = {self.name_next(k): s[k] for k in s.keys()}
        # tran.update(self.__outcome_variables(obs))
        info = {}
        return tran, info

    def _state_variables(self, obs):
        # print('obs',obs)
        # print(len(obs))
        # print('w1',obs[11])
        # print('w2',obs[12])
        # print('w3',obs[13])
        return {
            POSITION_X: obs[0],  # 位置x
            POSITION_Y: obs[1], # 位置y
            WORK_TIME: obs[2],  # 骑手开始工作时间
            SLEEP_TIME: obs[3],  # 骑手休息时间
            ORDER_ID: obs[4] , # 分配的订单id
            ORDER_PICKUP_X: obs[5],  # 订单取点x
            ORDER_PICKUP_Y:obs[6], # 订单取点y
            ORDER_DELIVERY_X:obs[7], #订单取点y
            ORDER_DELIVERY_Y:obs[8], #订单送点y
            ORDER_MONEY: obs[9], # 订单金额
            PERFORMANCE:obs[10], #骑手效能
            W1: obs[11],
            W2: obs[12],
            W3: obs[13]
        }
    
    # def __outcome_variables(self, obs):
    #     return {ORDER_MONEY:obs[9]}

    def terminated(self, transition) -> bool:
        return False  # 可以根据具体需求定义终止条件

    def random_action(self):
        # return {ACTION: self.__core.action_space.sample()}
        return {ACTION: random.randint(0, 1)}
    # def action_mask(self):
    #     return self.__core.action_mask()

    def __str__(self):
        return "Meituan"
# test
# env = MeituanRiderWrapper()
# obs = env.reset()
# print("Initial Observation:")
# print(obs)

# actions = [env.action_space.sample() for _ in range(env.num_riders)]
# obs, rewards, done, info = env.transit(actions)
# print("Next Observation:")
# print(obs)
# print("Rewards:")
# print(rewards)
