import random
import sys
import numpy as np
from core.env import Env
from core.vtype import ContinuousNormal, NamedCategorical
sys.path.append("/data1/yxn/repast-city/")
from mpi4py import MPI
from myModel import Model

# self.pt.x, self.pt.y, self.walk_time, self.sleep_time, best_order.id, best_order.pickup_location[0], best_order.pickup_location[1], best_order.delivery_location[0], best_order.delivery_location[1], utility
# 信息分别是：系统效能，价值熵，生产力， 平均效能，平均金钱， 平均休息时间， 平均劳动成本， 平均最多获得订单数量， 最多选择区域
SYSTEM_UTILITY = 'system_utility'
ENTROPY = 'entropy'
PRODUCTIVITY = 'productivity'
AVE_UTILITY = 'ave_utility'
AVE_MONEY = 'ave_money'
AVE_REST_TIME = 'ave_rest_time'
AVE_LABOR_COST = 'ave_labor_cost'
AVE_ORDER_NUMBER = 'ave_order_number'
MOST_AERA = 'most_area'
W1 = 'w1'
W2 = 'w2'
W3 = 'w3'

ACTION = 'choose_region'

NEXT = {name: Env.name_next(name) for name in [SYSTEM_UTILITY, ENTROPY, PRODUCTIVITY, AVE_UTILITY, AVE_MONEY, AVE_REST_TIME, AVE_LABOR_COST, AVE_ORDER_NUMBER, MOST_AERA,W1,W2,W3]}

class MeituanMacro(Env):
    def __init__(self, args):
        super().__init__(args)
        # Define all the parameters needed for Model initialization
        params = [
            0,  # run_time
            MPI.COMM_WORLD,  # comm
            None,  # websocket
            10,  # rider_len
            [300, 300, 300, 300, 300 ,300, 300, 300, 300, 300],  # sleep_time
            [2, 2, 2, 3, 4, 2, 2, 2, 3, 4],  # walk_time
            3,  # intervention_strategy
            0.5,  # percentage
            [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],  # learning_types
            [1, 1, 1, 1, 1, 1, 1, 1, 1, 1],  # array_sex (optional)
            [10, 10, 10, 10, 10, 10, 10, 10, 10, 10],  # array_diligent (optional)
            None,  # regulation_type (optional)
            'ER',  # network_type (optional)
            0.8,  # prob (optional)
            1, # 1 with reposition；0 without 
            3,  # BA_add_edges (optional)
            3,  # WS_init_neighbors (optional)
            3,  # RG_init_degree (optional)
            None,  # order_prob (optional)
            3,  # order_BF (optional)
            0  # npc_num (optional)
        ]

        # Create an instance of the Model class with the parameters list
        self.__core = Model(*params)


    def define(self, args):
        _def = Env.Definition()
            # Define all the parameters needed for Model initialization
        params = [
            0,  # run_time
            MPI.COMM_WORLD,  # comm
            None,  # websocket
            10,  # rider_len
            [300, 300, 300, 300, 300 ,300, 300, 300, 300, 300],  # sleep_time
            [2, 2, 2, 3, 4, 2, 2, 2, 3, 4],  # walk_time
            3,  # intervention_strategy
            0.5,  # percentage
            [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],  # learning_types
            [1, 1, 1, 1, 1, 1, 1, 1, 1, 1],  # array_sex (optional)
            [10, 10, 10, 10, 10, 10, 10, 10, 10, 10],  # array_diligent (optional)
            None,  # regulation_type (optional)
            'ER',  # network_type (optional)
            0.8,  # prob (optional)
            1, # 1 with reposition；0 without 
            3,  # BA_add_edges (optional)
            3,  # WS_init_neighbors (optional)
            3,  # RG_init_degree (optional)
            None,  # order_prob (optional)
            3,  # order_BF (optional)
            0  # npc_num (optional)
        ]

        # Create an instance of the Model class with the parameters list
        self.__core = Model(*params)
        # SYSTEM_UTILITY, ENTROPY, PRODUCTIVITY, AVE_UTILITY, AVE_MONEY, AVE_REST_TIME, AVE_LABOR_COST, AVE_ORDER_NUMBER, MOST_AERA
        _def.state(SYSTEM_UTILITY,(ContinuousNormal(scale=None) ))
        _def.state(ENTROPY,(ContinuousNormal(scale=None)))
        _def.state(PRODUCTIVITY, (ContinuousNormal(scale=None) ))
        _def.state(AVE_UTILITY, (ContinuousNormal(scale=None) ))
        _def.state(AVE_MONEY,(ContinuousNormal(scale=None) ))
        _def.state(AVE_REST_TIME,(ContinuousNormal(scale=None) ))
        _def.state(AVE_LABOR_COST,(ContinuousNormal(scale=None) ))
        _def.state(AVE_ORDER_NUMBER,(ContinuousNormal(scale=None) ))
        _def.state(MOST_AERA,(ContinuousNormal(scale=None) ))
        _def.state(W1,(ContinuousNormal(scale=None) ))
        _def.state(W2,(ContinuousNormal(scale=None) ))
        _def.state(W3,(ContinuousNormal(scale=None) ))
        _def.action(ACTION, NamedCategorical('region1', 'region2','region3','region4' ))
        # _def.reward('order completed', [NEXT[ORDER_STATE]], lambda x: np.sum(np.where((x == 2) | (x == 'completed'), 1.0, 0.0)))
        # _def.reward('value entropy', [NEXT[ENTROPY]], lambda x:  x) 
        _def.reward('w1',[NEXT[W1]], lambda x:  x)
        _def.reward('w2',[NEXT[W2]], lambda x:  x)
        _def.reward('w3',[NEXT[W3]], lambda x:  x)


        return _def
        
    @classmethod
    def init_parser(cls, parser):
        parser.add_argument("--render", action="store_true", default=False, help="render the environment in a window.")
    
    def launch(self):
        # Define all the parameters needed for Model initialization
        params = [
            0,  # run_time
            MPI.COMM_WORLD,  # comm
            None,  # websocket
            10,  # rider_len
            [300, 300, 300, 300, 300 ,300, 300, 300, 300, 300],  # sleep_time
            [2, 2, 2, 3, 4, 2, 2, 2, 3, 4],  # walk_time
            3,  # intervention_strategy
            0.5,  # percentage
            [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],  # learning_types
            [1, 1, 1, 1, 1, 1, 1, 1, 1, 1],  # array_sex (optional)
            [10, 10, 10, 10, 10, 10, 10, 10, 10, 10],  # array_diligent (optional)
            None,  # regulation_type (optional)
            'ER',  # network_type (optional)
            0.8,  # prob (optional)
            1, # 1 with reposition；0 without 
            3,  # BA_add_edges (optional)
            3,  # WS_init_neighbors (optional)
            3,  # RG_init_degree (optional)
            None,  # order_prob (optional)
            3,  # order_BF (optional)
            0  # npc_num (optional)
        ]# npc_num (optional)
        

        # Create an instance of the Model class with the parameters list
        self.__core = Model(*params)


    def reset(self):
        super().reset()
        return self.__core.reset(return_type=1)
    
    def init_episode(self, *args, **kwargs):
        obs = self.__core.reset(1)
        print(len(obs))
        return self._state_variables(obs)
    
    def transit(self, action):
        # print('action',action)
        a = action[ACTION] #!!
        # print('传入的动作a', a)
        obs = self.__core.step(-1, int(a), 1)
        # print('返回的观察值',obs)
        # 包装返回的状态和信息
        s = self._state_variables(obs)
        tran = {self.name_next(k): s[k] for k in s.keys()}
        info = {}
        return tran, info

    def _state_variables(self, obs):
    #   # SYSTEM_UTILITY, ENTROPY, PRODUCTIVITY, AVE_UTILITY, AVE_MONEY, AVE_REST_TIME, AVE_LABOR_COST, AVE_ORDER_NUMBER, MOST_AERA
        return {
            SYSTEM_UTILITY: obs[0], 
            ENTROPY : obs[1],
            PRODUCTIVITY: obs[2], 
            AVE_UTILITY: obs[3],  
            AVE_MONEY: obs[4] , 
            AVE_REST_TIME: obs[5],  
            AVE_LABOR_COST:obs[6], 
            AVE_ORDER_NUMBER:obs[7],
            MOST_AERA:obs[8],
            W1: obs[9],
            W2: obs[10],
            W3: obs[11]
        }
    
    def terminated(self, transition) -> bool:
        return False  # 可以根据具体需求定义终止条件

    def random_action(self):
        # return {ACTION: self.__core.action_space.sample()}
        return {ACTION: random.randint(0, 3)}
    # def action_mask(self):
    #     return self.__core.action_mask()

    def __str__(self):
        return "Meituan_Macro"
