import sys
import gym
from gym import spaces
import numpy as np
import random
sys.path.append("/data1/yxn/repast-city/")
# 博弈机制：
# 1、心情：骑手需要平衡工作时长和心情，以确保能够持续工作和接单。
# 骑手休息时，心情值增加。
# 骑手接单或送单时，心情值降低。
# 骑手的绩效提升时，心情值增加；绩效下降时，心情值降低。

# 2、订单分配：每个骑手独立地选择订单，订单是有限资源，骑手之间存在竞争。
# 订单状态：一个订单只能被一个骑手接单和送达，因此订单状态在骑手之间共享并影响他们的决策。
# 地理位置：骑手的地理位置会影响他们选择订单的优先级，位置相近的骑手可能竞争同一个订单。

# 3.绩效影响：高绩效的骑手更容易接到高金额订单，这增加了他们的绩效，进一步加剧了骑手之间的竞争。
# 低绩效骑手需要通过增加工作时长和提高接单频率来提升绩效，同时也需要平衡心情值和休息时间
# 具体：定义一个绩效阈值，低于此阈值的骑手会减少休息频率，增加工作时间

# step:0
# 动作包含以下内容：
# 1.接单 (action = 0)
# 2.拒单 (action = 1)

# 观察值包含以下内容：
# 骑手的信息：包括位置（x, y）、骑手状态（空闲、送餐中、休息）、骑手的订单id、骑手的订单状态、心情值、休息时长、绩效
# 订单的信息：包括位置（x, y）、金额、状态（未接单、已接单、已送达）。
import gym
from gym import spaces
import numpy as np
import random

class MeituanEnv(gym.Env):
    def __init__(self):
        super(MeituanEnv, self).__init__()
        
        self.num_merchants = 10
        self.num_riders = 5
        self.num_orders = 20
        
        
        # 定义动作空间
        self.action_space = spaces.Discrete(2)
        # 定义一个低值和高值数组，每个维度都在0到100之间
        low = np.zeros(8)
        high = np.ones(8) * 100
        # 使用shape参数直接定义
        self.observation_space = spaces.Box(low=low, high=high, shape=(8,), dtype=np.float32)
        
        # 初始化状态
        self.performance_threshold = 0  # 初始化绩效阈值，低于这个阈值采取激进状态
        self.merchants = np.random.rand(self.num_merchants, 2) * 100  # 商家的位置
        self.riders = np.random.rand(self.num_riders, 2) * 100  # 骑手的位置
        self.orders = self.generate_orders()
        self.rider_states = np.zeros((self.num_riders, 2), dtype=int)  # 第一列：0:空闲, 1:送餐中, 2:休息；第二列：orderID；
        self.rider_states[:, 1] = -1  # 将第二列(orderID)的所有元素设置为-1
        self.rider_types = [random.choice(['rule_based', 'imitation_learning']) for _ in range(self.num_riders)]
        self.rider_performance = np.zeros(self.num_riders)  # 记录骑手的绩效
        self.vision = 10 # 骑手的视野
        self.rider_mood = np.ones(self.num_riders) * 50  # 初始化心情值为50
        self.rider_work_time = np.zeros(self.num_riders)  # 骑手当前工作时长
        self.rider_rest_time = np.zeros(self.num_riders)  # 骑手当前休息时长
        self.base_rest_time = np.zeros(self.num_riders) # 骑手的基础休息时间
        self.max_work_time = 100  # 设置最大工作时长
        self.previous_performance = np.zeros(self.num_riders)
        self.order_idx = -1
        
    def reset(self):
        self.merchants = np.random.rand(self.num_merchants, 2) * 100
        self.riders = np.random.rand(self.num_riders, 2) * 100
        self.orders = self.generate_orders()
        self.rider_states = np.zeros((self.num_riders, 2),dtype=int)
        self.rider_states[:, 1] = -1
        self.rider_types = [random.choice(['rule_based', 'imitation_learning']) for _ in range(self.num_riders)]
        self.rider_performance = np.zeros(self.num_riders)
        self.rider_mood = np.ones(self.num_riders) * 50 # 最大100
        self.rider_work_time = np.zeros(self.num_riders)
        self.rider_rest_time = np.zeros(self.num_riders)
        self.base_rest_time = np.zeros(self.num_riders) 
        self.previous_performance = np.zeros(self.num_riders)
        return self._get_obs()
    
    def generate_orders(self):
        return {
            'locations': np.random.rand(self.num_orders, 2) * 100,
            'merchants': np.random.choice(self.num_merchants, self.num_orders),
            'amounts': np.random.randint(5, 100, self.num_orders),  # 订单金额
            'statuses': np.zeros(self.num_orders)  # 0:未接单, 1:已接单, 2:已送达
        }
    
    def step(self, action):
        assert self.riders is not None, "Call reset before using step method."
        actions = np.random.randint(0, 2, self.num_riders)
        for i in range(self.num_riders):
            if i == 0:
                actions[i] = action
            else:
                actions[i] = self.action_space.sample()
        actions = self.apply_action_mask(actions) # 动作掩码修改动作
        # 每个时间步有30%的概率生成新订单并替换已送达的订单
        if np.random.rand() < 0.5:
            for idx in range(len(self.orders['statuses'])):
                if self.orders['statuses'][idx] == 2:  # 已送达的订单
                    new_order = self.generate_orders()
                    print("new_order['locations'][0]", new_order['locations'][0])
                    self.orders['locations'][idx] = new_order['locations'][0]
                    self.orders['merchants'][idx] = new_order['merchants'][0]
                    self.orders['amounts'][idx] = new_order['amounts'][0]
                    self.orders['statuses'][idx] = new_order['statuses'][0]
        rewards = np.zeros(self.num_riders)
        # 更新绩效阈值
        average_performance = np.mean(self.rider_performance)
        self.performance_threshold = 2 / 3 * average_performance
        for i in range(self.num_riders):
            # 检查骑手是否低绩效
            is_low_performance = self.rider_performance[i] < self.performance_threshold
            if self.rider_states[i][0] == 2:  # 骑手在休息状态
                self.rider_rest_time[i] += 1
                self.update_mood(i, actions[i])
                if self.rider_rest_time[i] >= self.calculate_rest_time(i,is_low_performance):
                    self.rider_states[i][0] = 0  # 休息结束，回到工作状态
                    self.rider_rest_time[i] = 0
                continue  # 休息状态下跳过其他操作
            
            speed_modifier = self.rider_mood[i] / 50.0  # 根据心情值调整移动速度
            
            if actions[i] == 0:  # 接单
                if self.rider_states[i][0] == 0 and self.willing_to_accept_order(i):  # 如果空闲且愿意接单
                    order_idx = self.select_order(i) #骑手的单
                    if i == 0:
                        self.order_idx = order_idx # 全局存一下第一个骑手的单
                        print('骑手接单了,接单的order是', order_idx)
                    if order_idx is not None:
                        self.rider_states[i] = [1, order_idx] 
                        self.orders['statuses'][order_idx] = 1  # 更新订单状态为已接单
                        # 随后模拟向订单地点移动
                        target = self.orders['locations'][order_idx]
                        direction = target - self.riders[i]
                        self.riders[i] += direction / np.linalg.norm(direction) * 2 * speed_modifier
                        if np.linalg.norm(self.riders[i] - target) < 1:
                            rewards[i] = self.orders['amounts'][order_idx]  # 送达订单获得奖励为订单金额
                            self.update_performance(order_idx,i) # 更新骑手绩效
                            # self.rider_performance[i] += self.orders['amounts'][order_idx]# 更新骑手绩效
                            self.rider_states[i] = [0, -1]# 回归空闲状态
                            self.orders['statuses'][order_idx] = 2 # 更新订单状态为已送达
            elif actions[i] == 1:  # 拒单
                if self.rider_states[i][0] == 0: # 如果是空闲状态下拒单 
                    # self.rider_performance[i] = max(0, self.rider_performance[i] - 5)# 拒单导致绩效降低
                    self.decrease_performance_for_rejection(i)
                    # 随机游走模拟
                    self.riders[i] += np.random.randn(2) * 2 * speed_modifier
                    # 如果没有订单，
                    self.order_idx = -1 # -1表示当前没订单
                    self.rider_states[i] = [0, -1]
                    if i == 0:
                        print('骑手拒单了,设置id为-1')
                if self.rider_states[i][0] == 1: # 送餐中强制执行拒单操作
                    order_idx = int(self.rider_states[i][1])
                    if i == 0:
                        print('送单过程拒单,此时订单id是', order_idx)
                    self.rider_states[i] = [1, order_idx]
                    target = self.orders['locations'][order_idx]
                    direction = target - self.riders[i]
                    self.riders[i] += direction / np.linalg.norm(direction) * 2 * speed_modifier  # 向订单前进
                    if np.linalg.norm(self.riders[i] - target) < 1:
                        rewards[i] = self.orders['amounts'][order_idx]  # 送达订单获得奖励为订单金额
                        self.update_performance(order_idx,i) # 更新骑手绩效
                        # self.rider_performance[i] += rewards[i]  # 更新骑手绩效
                        self.rider_states[i] = [0, -1]  # 回归空闲状态
                        self.orders['statuses'][order_idx] = 2  # 更新订单状态为已送达


            # 绩效归一化（变为0-100之间）
            self.normalize_performance()
            # 更新心情值和工作时长
            self.update_mood(i, self.rider_states[i][0])
            if self.rider_states[i][0] != 2:  # 不休息则增加工作时长
                self.rider_work_time[i] += 1
                if self.rider_work_time[i] >= self.max_work_time:
                    self.rider_states[i][0] = 2  # 超过最大工作时长，强制休息
                    self.rider_work_time[i] = 0
            
        done = False
        return self._get_obs(), rewards, done, {}
    
    def select_order(self, rider_idx):
        rider_pos = self.riders[rider_idx]
        rider_type = self.rider_types[rider_idx]
        best_order = None
        best_value = -np.inf
        
        for idx in range(len(self.orders['locations'])):
            if self.orders['statuses'][idx] != 0:  # 忽略已接单或已送达的订单
                continue
            order_pos = self.orders['locations'][idx]
            order_amount = self.orders['amounts'][idx]
            distance = np.linalg.norm(rider_pos - order_pos)
            
            # 根据绩效调整接单优先级
            performance_factor = self.rider_performance[rider_idx] / (1 + distance)
            
            if rider_type == 'rule_based':
                # print('order_amount, distance', order_amount, distance)
                if order_amount > distance * 0.5:
                    return idx
            elif rider_type == 'imitation_learning':
                # # 模仿学习策略：模仿表现最好的骑手
                # best_rider_idx = np.argmax(self.rider_performance)
                # if best_rider_idx != rider_idx:
                #     best_rider_order = self.rider_states[best_rider_idx][1]
                #     if best_rider_order is not None and best_rider_order < len(self.orders['locations']):
                #         return best_rider_order
                # 如果没有优秀骑手或订单无效，则选择基于绩效调整的订单
                if performance_factor > best_value:
                    best_value = performance_factor
                    best_order = idx

        if best_order == None:
            available_orders = [idx for idx, status in enumerate(self.orders['statuses']) if status == 0]
            best_order = np.random.choice(available_orders)
        return best_order
    
    def willing_to_accept_order(self, rider_idx):
        mood = self.rider_mood[rider_idx]
        return mood > 30  # 假设心情值大于30时愿意接单
    
    def update_mood(self, rider_idx, rider_status):
        if rider_status == 2:  # 休息
            self.rider_mood[rider_idx] = min(100, self.rider_mood[rider_idx] + 5)
        else:  # 工作时长增加导致心情值降低
            self.rider_mood[rider_idx] = max(0, self.rider_mood[rider_idx] - 1)
        # 根据绩效的变化更新心情值
        performance_change = self.rider_performance[rider_idx] - self.previous_performance[rider_idx]
        if performance_change > 0:
            self.rider_mood[rider_idx] = min(100, self.rider_mood[rider_idx] + performance_change / 10)
        elif performance_change < 0:
            self.rider_mood[rider_idx] = max(0, self.rider_mood[rider_idx] + performance_change / 10)
        
        self.previous_performance[rider_idx] = self.rider_performance[rider_idx]
    
    def calculate_rest_time(self, rider_idx, is_low_performance):
        # 根据工作时长计算休息时长，每隔一段时间休息一下
        base_rest_time = min(self.max_work_time // 10, self.rider_work_time[rider_idx] // 10 + 1) * 10
        if is_low_performance:
            # 减少休息时间以提高工作时长
            self.base_rest_time[rider_idx] = int(base_rest_time * 0.5)
            return self.base_rest_time[rider_idx]
        else:
            self.base_rest_time[rider_idx] = base_rest_time
            return self.base_rest_time[rider_idx]
    
    def _get_obs(self):
        # 只返回第一个骑手的观察值
        # 骑手的信息：位置（x, y）、心情值、基础休息时间、绩效
        # print('订单位置',self.orders['locations'][self.order_idx])
        # print('订单金额',self.orders['amounts'][self.order_idx])
        # print('骑手位置',self.riders[0])
        # print('骑手心情值',self.rider_mood[0])
        # print('self.order_idx',self.order_idx)
        # print('所有order',self.orders.items())
        first_rider_obs = np.hstack([
            self.riders[0],  # 第一个骑手的位置
            [self.rider_mood[0]],  # 第一个骑手的心情值
            [self.base_rest_time[0]],  # 第一个骑手的基础休息时间
            [self.rider_performance[0]],  # 第一个骑手的绩效
            [self.orders['amounts'][self.order_idx]], # 第一个骑手被派遣的单
            self.orders['locations'][self.order_idx] # 第一个骑手的订单的位置
        ])

        print('返回的观察值',first_rider_obs) 
        return first_rider_obs
    
    def action_mask(self):
        """
        生成每个骑手的动作掩码。
        如果骑手处于送餐中(状态码1)或休息(状态码2),则接单动作(action 0)设置为不可执行。
        """
        mask = np.zeros((self.num_riders, self.action_space.n), dtype=np.float32)
        
        for i in range(self.num_riders):
            if self.rider_states[i][0] == 1 or self.rider_states[i][0] == 2:
                # 当骑手处于送餐中或休息状态，设置接单动作为不可执行
                mask[i][0] = -np.inf  # 设置为负无穷，表示这个动作不应该被选择
                
            # 如果需要可以设置其他动作的限制
            # 例如，如果拒单动作是 action 1，且你想限制某些条件下不能拒单
            # if <some_condition>:
            #     mask[i][1] = -np.inf
        
        return mask
    
    def apply_action_mask(self, actions):
        """
        应用动作掩码,修改actions数组。
        如果某个动作被掩码阻止，则将其更改为其他可能的动作。
        """
        mask = self.action_mask()
        print(mask)
        for i in range(self.num_riders):
            print(actions[i])
            if mask[i][actions[i]] == -np.inf:
                # 找到第一个可执行的动作作为替代
                actions[i] = np.where(mask[i] != -np.inf)[0][0]
        return actions


    def update_performance(self, order_idx, rider_idx):
        # 更新骑手绩效
         self.rider_performance[rider_idx] += self.orders['amounts'][order_idx]

    def decrease_performance_for_rejection(self, rider_idx):
        # 拒单导致的绩效降低，绩效不能低于0
        self.rider_performance[rider_idx] = max(0, self.rider_performance[rider_idx] - 5)

    def normalize_performance(self):
        # 归一化绩效
        min_performance = min(self.rider_performance)
        max_performance = max(self.rider_performance)
        range_performance = max_performance - min_performance

        # 避免除零错误
        if range_performance == 0:
            return  # 所有绩效值相同，无需归一化

        # 应用归一化
        for i in range(len(self.rider_performance)):
            self.rider_performance[i] = ((self.rider_performance[i] - min_performance) * 100) / range_performance


    def render(self):
        pass

