import asyncio
import csv
import math
from typing import Tuple
import repast4py.space as space
from repast4py.core import Agent
from repast4py.space import DiscretePoint as dpt
from repast4py.space import ContinuousSpace
from repast4py.space import ContinuousPoint as cpt
from repast4py import context as ctx
from repast4py import schedule, logging
from mpi4py import MPI

from random import randint, randrange
import pandas as pd
import numpy as np
import json
import copy
import random
import time
import os
import sys
import networkx as nx
import matplotlib.pyplot as plt
from matplotlib.animation import FuncAnimation
from LLM.accept_order_llm import accept_order
from LLM.choose_region_llm import choose_region_llm
from MeiTuan.outlier_detection import anormal_region_detection
from MeiTuan.dispatch import two_stage_fast_heuristic
from MeiTuan.cal_utility import calUtility
from agentDesign.L1.read_map import AStarPlanner, ReadMap
from agentDesign.L2.learning_module import ImitationLearning, EvolutionaryLearning, QLearning

from initmodel.initriders import init_riders
from cognitive.rider_plan import plan_time, plan_position, check_money, trick_to_time
from MeiTuan.generateOrders import fitting_dist, orders_list

from cognitive.rider_transfer_llm import get_new_work_location
from env import *
from user import User
import math
from agentDesign.agent import Individual


#  运行模型时候新建棋手，需要选择：学习方式，活动时间


class Rider(Individual):
    TYPE = 0

    def __init__(self,
                 r_id: int,
                 rank: int,
                 max_orders,
                 model,
                 learning_type: int,
                 standing_time: int,
                 sleeping_time: int,
                 sex: int,
                 diligent_level: int,
                 now_speed: str,
                 now_orders,
                 home: tuple,
                 share_data: bool,
                 w1: int,
                 w2: int,
                 w3: int,
                 il_prob
                 ):
        super().__init__(id=r_id, rank=rank, t=Rider.TYPE, home=home, feature=[], il_prob=il_prob)
        # 初始化骑手的属性，例如位置、速度等
        self.model = model  # 当前模型
        self.speed = WALK_NUM
        self.route = []  # 美团规划路线路径
        self.order_count = 0  # 当前处理的订单数
        self.max_orders = max_orders  # 最多处理订单数
        self.pt = dpt(home[0], home[1], 0)  # 骑手的当前位置
        self.location = (self.pt.x, self.pt.y)
        self.learning_type = learning_type  # 生成指定的学习的类型， 0是模仿学习，1是大模型决策, 2是强化学习
        self.order_info = []  # 当前手中的订单
        # 效应函数权重
        self.w1 = w1  #  目标：金钱收入更多
        self.w2 = w2  #  目标：距离较少
        self.w3 = w3  #  目标：工作时间更少
        self.wish_money = 100


        # 分别设置没有完成的步伐和已经完成的步伐，已经完成的步伐可以进行可视化
        self.position = []  # 存放需要完成的步数
        self.done_position = []  # 存放已经完成的步数  // 之后可以删去
        self.update_position = []  # 用于向前端传送当前step中行走的步数
        self.random_position = [False, init_list[self.id % 10],
                                []]  # 休闲时间随机游走，对应的是为了接到更多的订单

        self.work_region = self.id  # 获得工作的区域
        self.move_regions = self.regions_work()  # 获得可以活动的范围
        self.walk_time = standing_time  # 起来的时间
        self.sleep_time = sleeping_time  # 睡觉的时间

        # 表示自己已经进入工作状态，可以接单，0表示不能接单, 1表示在前往工作的路上或者在处理手上的单，2表示可以接单 ,3 表示正在被调往其他区域，调度过程中可以接单
        self.status = 0
        self.routes_before = []  # 订单对应的动作(pick_up, delivery）,订单的编号，订单的位置,
        self.money = 0
        self.labor = 0  # 劳动成本 用移动距离表示
        self.home = self.pt
        self.count_down = []  # 完成订单的时间点
        self.order_num = 0
        self.time_position = 0
        self.check = 0
        self.grab = 0

        self.pre_money = 0
        self.pre_labor = 0  # 劳动成本 用移动距离表示

        self.step_income = list()
        self.step_cost = list()

        self.max_merchant = {"sum": 0}
        self.least_merchant = {"sum": 0}
        self.one_hour_info = [0]  # 第一条：一小时内转的钱

        #随机游走
        self.point_random = random.choice(random_list)[0]

        # 记录每个区域评价的信息
        self.region_info_list = [[0] * NUM_REGION for _ in range(3)]

        #  模仿学习使用的属性
        self.feature = [self.w1, self.w2, self.w3, self.prob_il]
        self.feature_effect = [1]  # 能力属性效果，必须于feature中的顺序一致,默认全都为正向属性
        self.feature_normalize = []  # 标准化后的属性值
        self.is_imate = share_data  # 是否共享信息
        self.im_index = -1  # 模仿的对象

        #  llm使用的属性
        self.pickup_merchant = []
        self.positive_reviews_num = 0
        self.negative_reviews_num = 0
        self.total_order = 0
        # 常量
        self.sex = sex
        self.diligent_level = diligent_level
        self.now_speed = now_speed
        self.now_orders = now_orders
        self.satisfaction_rate = 0.0
        self.llm_const_info = {'id': self.id, 'sex': self.sex, 'diligent_level': self.diligent_level,
                               'now_speed': self.now_speed, 'now_orders': self.now_orders}
        # 强化学习所使用属性
        self.money_region = [0 for _ in range(REGION_WORK)]
        self.money_region_now = [0 for _ in range(REGION_WORK)]
        self.q_table = np.zeros((1, REGION_WORK))
       
        # 进化学习所使用的属性
        self.dis_len = random.randint(200, 300)
        self.dis_import = random.randint(1, 8)
        self.money_import = 10 - self.dis_import
        self.order_memory = {}
        self.order_memory_Normalization = [0, 0, 0, 0]
        self.best_order_info = [-1, -1 , -1, -1, -1, -1]

        # 因果分析所需要的行为，进行控制
        self.actions = [-1, -1] # 第一个-1表示的是说接受正常订单的执行，0表示说订单所有都拒绝，1表示所有都接受；第二个表示的是选择区域， -1就是正常选择，其余数字就是选择商区
        self.return_info = [self.pt.x, self.pt.y, self.walk_time, self.sleep_time] + self.best_order_info

        self.network = []  # 存储骑手在网络上的邻居，用列表的形式存储

        self.RL_choose_region = QLearning()
        self.RL_random_work = QLearning()
        # 记录信息
        script_dir = os.path.dirname(os.path.abspath(__file__))
        folder = "llm_agent_log"
        folder_path = os.path.join(script_dir, folder)
        if not os.path.exists(folder_path):
            os.makedirs(folder_path)
        path = f"deliver_{self.id}_record_llm.csv"
        filepath_agent = os.path.join(folder_path, path)

        with open(filepath_agent, mode='w', newline='') as file:
            file.seek(0)
            file.truncate()
            writer = csv.writer(file)
            writer.writerow(["id", "x", "y", "time", "order_cluster", "work_status", "money", "home_x", "home_y",
                             "start_work_time", "end_work_time", "day", "total_order", "sex", "diligent_level",
                             "learn_type"])

        script_dir = os.path.dirname(os.path.abspath(__file__))
        folder = "llm_agent_log"
        folder_path = os.path.join(script_dir, folder)
        if not os.path.exists(folder_path):
            os.makedirs(folder_path)
        path = f"{self.id}_response_llm.txt"
        filepath_agent = os.path.join(folder_path, path)
        with open(filepath_agent, mode='w', newline='') as file:
            file.write("start")

    def w_change(self): # 更改权重w
        while True:
            num1 = random.random()
            num2 = random.random()
            num3 = 1 - (num1 + num2)
        
            if num1 > 0 and num2 > 0 and num3 > 0:
                self.w1 = num1
                self.w2 = num2
                self.w3 = num3
                self.feature = [self.w1, self.w2, self.w3, self.prob_il]
                return
    
    def region_info_update(self):  # 每个区域的信息进行更新
        now_money = sum(self.region_info_list[0])
        now_labor = sum(self.region_info_list[1])
        
        new_money = self.money - now_money
        new_labor = self.labor - now_labor

        self.region_info_list[0][self.work_region] += new_money
        self.region_info_list[1][self.work_region] += new_labor


    def choose_region_il(self):  # 模仿学习的选择商区
        if self.im_index != -1:
            print(self.id, self.work_region, "change region success !!!" * 2, self.neighbors[self.im_index].work_region,self.im_index)
            self.work_region =  self.neighbors[self.im_index].work_region
            self.im_index = -1
            
    def choose_region_rl(self):

        duration = [abs(self.pt.x - Region_centre[i][0]) + abs(self.pt.y - Region_centre[i][1]) for i in range(NUM_REGION) ] 
        scores = [self.RL_choose_region.discount_rate**duration[i] * self.RL_choose_region.get_grid_value(i)
                  - self.RL_choose_region.get_grid_value(self.work_region)  for i in range(NUM_REGION) ] 
        
        max_value = max(scores)
        max_index = scores.index(max_value)
        # print("强化学习选择区域 ！！！！", self.work_region,  max_index)
        if max_index != self.work_region:
            print("强化学习选择区域 ！！！！", self.work_region,  max_index)
        self.work_region = max_index

    def agent_calculate_score(self, agent_w1, agent_w2, agent_w3):
        w2 = 1 if (self.labor / (self.model.runner_step + 1) ) == 0 else (self.labor / (self.model.runner_step + 1) )
        w3 = 1 if ((self.sleep_time - self.walk_time) / ONE_DAY) == 0 else (1 / ((self.sleep_time - self.walk_time) / ONE_DAY))

        # 为了解决一种特殊情况：有一个agent 处于难以获得订单的位置，但其移动不多，因此w2 会非常小，导致score很大，大家都向这个agent学习，都来到一个没有单的地方
        if self.money == 0 and self.labor < 10:
            w2 *= 5
            
        return agent_w1 * (self.money / self.wish_money) + agent_w2 * (1 / w2) + agent_w3 * (1 / w3)

    def now_info_send(self): # 传输信息给前端显示
        return [self.id, self.status, self.work_region, self.labor, self.money, self.order_num]

    def init_riders_network(self, other_riders):  # 初始化网络，单个棋手与当前棋手建立网络
        for other_rider in other_riders:
            if other_rider not in self.network and other_rider.is_imate:
                self.network.append(other_rider)
                other_rider.network.append(self)
                print(f"棋手{self.id}和{other_rider.id}建立联系")

    def remove_riders_network(self, other_riders):  # 删除棋手之间的网络
        for other_rider in other_riders:
            if other_rider in self.network:
                self.network.remove(other_rider)
                other_rider.network.remove(self)
                print(f"棋手{self.id}和{other_rider.id}删除联系")

    def find_best_order(self):
        # 根据订单价值选择最佳订单，只对比当前被派的单和高质量订单
        current_order_income = self.routes_before[-1][4]
        best_order = self.find_order_by_id(self.routes_before[-1][1])
        is_high_quality = False

        # 对比该区域的高质量订单
        for order in self.model.meituan.high_quality_orders[self.work_region]:
            # print('order.money', order.money)
            if order.money > current_order_income:
                current_order_income = order.money
                best_order = order
                is_high_quality = True
                print('决定抢单')
                # self.grab = 1
        if not is_high_quality:
            print('不抢单')
            self.grab = 0

        # print('best_order', best_order)
        assert best_order != None
        return best_order, is_high_quality

    def find_order_by_id(self, order_id):
        # 遍历所有区域的订单列表
        for region_orders in self.model.meituan.normal_orders:
            # 遍历当前区域的所有订单
            for order in region_orders:
                if order.id == order_id:
                    return order  # 返回找到的订单实体
        return None  # 如果没有找到，返回None

    def process_orders(self, orders, intervention_strategy, percentage, grid, riders):
        # 确定最佳订单，考虑高质量订单
        self.routes_before = self.routes_before + self.route  # 之前的路径
        best_order, is_high_quality = self.find_best_order()
        if best_order:
            print(f'骑手{self.id} 准备执行的订单是{best_order}, 金额{best_order.money}, 是否为高质量订单:{is_high_quality}')
            self.best_order_info = [best_order.id, best_order.pickup_location[0], best_order.pickup_location[1], best_order.delivery_location[0], best_order.delivery_location[1], best_order.money]
            self.take_order(best_order, is_high_quality)
            print(f'骑手{self.id}已经执行完毕订单')
            self.update_money(orders, intervention_strategy, percentage,
                              self.model.time_model, self.model.runner_step, riders)  # 更新订单收益同时更新对收益的记忆
            # 更新商户信息，处理路径转化为位置
            for route_info in self.routes_before:
                self.pickup_merchant_update(route_info)
            self.route_to_walk(self.runner_step, grid)

    def update_positions(self, grid, riders):
        # 每一个时间步rider统一走10步
        walk_tim = self.speed

        if len(self.position):
            print("move have")
            self.random_position[2] = []  # 随机行动是否重新开始
            walk_len = walk_tim if len(self.position) > walk_tim else len(self.position)
            for i in range(walk_len):
                self.move(grid)
            walk_tim -= walk_len
        elif (self.status == 1 or self.status == 3) and len(self.position) == 0:
            self.status = 2  # 开始工作

        

        #  随机游走
        if walk_tim > 0:
            self.random_walk(walk_tim, grid, riders, self.runner_step)
        if self.actions[1] != -1: # 宏观控制选择的区域
            self.work_region = self.actions[1]
        elif (not (self.status == 1 or self.status == 3)) and self.runner_step >= 10 and self.runner_step % 5 == 0: # 自主判断
            if self.status != 0 and self.learning_type == 2:
                self.choose_region_rl()
            elif self.status != 0 and self.learning_type== 0:
                self.choose_region_il()

                # self.money_check(riders, self.runner_step)  # 更新所在区域

    def log_step_info(self):
        # 记录步行信息等
        if self.runner_step % 10 == 0:  # 每10步记录一次信息
            with open(f"llm_agent_log/deliver_{self.id}_record_llm.csv", 'a', newline='') as file:
                writer = csv.writer(file)
                writer.writerow([self.id, self.pt.x, self.pt.y, self.runner_step, self.money])

    def take_order(self, order, is_high_quality):
        # 接受订单
        self.route.append((order.pickup_location, order.delivery_location))
        self.money += order.money  # 增加收入
        self.order_count += 1
        if is_high_quality:
            # 处理高质量订单的特殊逻辑
            # 将高质量订单的信息添加到骑手的路线中
            self.route.pop()  # 移除mymodel中添加的普通订单
            self.route[-1] = self.route[-1] + (order.region, order.money)
            self.best_order_info = [-1, -1, -1, -1, -1, -1]
            print("处理的是高质量订单，已加入骑手路线")
        else:
            # 普通订单信息更新
            # 查找当前工作区域中对应的普通订单
            region_order = next((ord for ord in self.model.meituan.normal_orders[self.work_region] if ord.id == order.id), None)
            if region_order:
                if len(self.routes_before) > 0 and self.runner_step % 10 == 0:
                    # 调用 choose_order 方法来决定是否接受普通订单
                    accepted, rejected = self.choose_order({region_order.region: [region_order]}, self.runner_step,
                                                           self.model.meituan.normal_orders)
                    if region_order.id in accepted:
                        # 如果订单被接受
                        # self.route[-1] = self.route[-1] + (region_order.region, region_order.money) 这里不再添加，在mymodel中已经加过了
                        print("处理的是普通订单，已加入骑手路线")
                    else:
                        # 如果订单被拒绝
                        print(f"普通订单 {region_order.id} 被拒绝，不加入骑手路线")
                        self.route.pop()  # 移除刚添加的路线
                        self.money -= order.money  # 回滚收入
                        self.order_count -= 1
                        self.best_order_info = [-1, -1, -1, -1, -1, -1]
                else:
                    print("处理的是普通订单，已加入骑手路线")

    def update_show_info(self):
        # 更新显示信息
        print(f"Step: {self.runner_step}, Position: {self.pt.x}, {self.pt.y}, Money: {self.money}")

    def income_cost_collection(self):
        self.step_income.append(self.money -self.pre_money)
        self.step_cost.append(self.labor-self.pre_labor) 

        self.pre_money = self.money
        self.pre_labor = self.labor

    def update_now_orders(self):
        """
            更新骑手当前手中的订单，按照获得订单的先后顺序存储，和route的顺序对应
        """
        for info in self.route:
            if info[0] == 'pickup':
                if info[1] in self.model.meituan.normal_orders_dict.keys():
                    self.now_orders.append(self.model.meituan.normal_orders_dict[info[1]])
                elif info[1] in self.model.meituan.highQ_orders_dict.keys():
                    self.now_orders.append(self.model.meituan.highQ_orders_dict[info[1]])

    def step(self, intervention_strategy, percentage, time_model, grid, orders, riders, order_id, runner_step,
                   eat_list):
        self.update_now_orders()
        # print('agent.route',self.id,  self.route, self.now_orders)
        self.best_order_info = [-1, -1, -1, -1, -1, -1]  # 更新最佳订单信息
        # 更新步数
        self.runner_step = runner_step
        # 清空当前传送位置
        self.update_position = []
        for index, info in enumerate(self.route):
            if len(info) == 2:
                order_now = self.model.meituan.normal_orders_dict[info[1]] if info[1] in self.model.meituan.normal_orders_dict[info[1]] else self.model.meituan.highQ_orders_dict[info[1]]
                location_order =order_now.pickup_location if info[0] == 'pickup' else order_now.delivery_location
                self.route[index] = info +order_now.region + (
                        order_now.region,
                        order_now.money)

        # 骑手前往工作地点
        if int(self.model.runner_step) == self.walk_time:
            self.start_walk(self.random_position[1][0], self.random_position[1][1])  # 从家出发
            self.status = 1  # 状态转为前往工作地点

        # 开始具体调用派单算法
        if self.status == 2  and self.route:
            print(f'骑手{self.id}，可以执行订单')
            # actions 0 -1
            #  根据行动作出决策，如果是-1的话就是正常执行 如果是其他行动，就宏观控制
            # 宏观控制 1接单 0拒单
            if self.actions[0] == 0:
                self.routes_before = self.routes_before + self.route  # 之前的路径
                for info in self.routes_before: # 拒绝的订单改为未结单的状态
                    self.model.meituan.normal_orders_dict[info[1]].status = "unprocessed"
                self.routes_before = self.route = []
                self.best_order_info = [-1, -1, -1, -1, -1, -1]

            elif self.actions[0] == 1:
                self.routes_before = self.routes_before + self.route
                print("接单")
                self.update_money(orders, intervention_strategy, percentage,
                              self.model.time_model, self.model.runner_step, riders)  # 更新订单收益同时更新对收益的记忆
                # 更新商户信息，处理路径转化为位置
                for route_info in self.routes_before:
                    self.pickup_merchant_update(route_info)
                self.route_to_walk(runner_step, grid)
                
            # 自主决策
            else:
                print("自主决策")
                self.process_orders(orders, intervention_strategy, percentage, grid, riders)  # 处理订单

        if self.status == 3:
            if len(self.position) == 0:
                self.status = 2

        # 返回第一个订单信息
        if self.now_orders:
            order_tmp = self.now_orders[0]
            self.best_order_info = [order_tmp.id, order_tmp.pickup_location[0], order_tmp.pickup_location[1], order_tmp.delivery_location[0], order_tmp.delivery_location[1], order_tmp.money]


        # if runner_step % 10 == 0:
        #     self.w_change()
        if self.learning_type == 0 and runner_step % 5 == 0:  # 模仿学习
            if len(self.network) != 0:
                self.network_imit(riders)
            else:
                self.random_point_imit(riders)
        elif  self.learning_type == 2 and runner_step % 5 == 0:  # 强化学习
            self.random_point_rl()


        self.update_positions(grid, riders)  # 更新位置

        # 记录日志信息
        self.log_step_info()

        # 清除过期的订单计数
        if self.runner_step in self.count_down:
            self.order_count -= self.count_down.count(self.runner_step)
            self.now_orders.pop(0)
            self.count_down = [time for time in self.count_down if time != self.runner_step]

        # 转为休息状态
        if int(runner_step) > self.sleep_time and self.status != 0:
            self.status = 0
            self.start_walk(self.home.x, self.home.y)  # 回家
        if int(runner_step) > self.sleep_time and self.status == 0 and len(self.position) == 0: # 更新作息时间
            self.walk_time += ONE_DAY
            self.sleep_time += ONE_DAY
 

        self.return_info = [self.pt.x, self.pt.y, self.walk_time, self.sleep_time]
        self.return_info += self.best_order_info
        self.route = self.routes_before = []  # 更新信息

        self.income_cost_collection()

        # self.pt.x, self.pt.y, self.walk_time, self.sleep_time,best_order.id, best_order.pickup_location[0], best_order.pickup_location[1], best_order.delivery_location[0], best_order.delivery_location[1], 0
        return self.return_info # 返回的信息包括： [位置（dpt形式）, 开始工作时间, 休息时间, "如果没有订单之后的信息是None"订单id, pickup_location, delivery_location, 订单类型(0表示的是普通订单，1表示高级订单)] 

    def money_check(self, riders, runner_step):
        riders_money = [rider.money for rider in riders]
        if self.money < sum(riders_money) / len(riders_money):
            self.choose_region(runner_step, riders)

    def regions_work(self):
        #  获得当前位置可以获得的订单所在的公司和饭店的范围, 默认是500
        region_work = []
        now_pt = self.now_position()
        for i in range(len(Region_info)):
            x, y = mean_point(Region_info[i])
            dis = math.sqrt(abs(now_pt.x - x) ** 2 + abs(now_pt.y - y) ** 2)
            if dis < 100:
                region_work.append(i)
        return region_work

    def action_log(self, runner_step, action, money, next_location, after_time):
        pass

    def llm_info_log(self, runner_step, time_model):
        path = f"llm_agent_log/deliver_{self.id}_record_llm.csv"

        with open(path, mode='a', newline='') as file:
            writer = csv.writer(file)
            writer.writerow([self.id, self.location[0], self.location[1], runner_step, 0, self.work_status(runner_step),
                             int(self.money), self.home.x, self.home.y, self.walk_time, self.sleep_time,
                             int(runner_step) // ONE_DAY + 1, self.total_order, self.sex, self.diligent_level, "llm"])

    def choose_order(self, orders, runner_step, orders_all):
        #  self.routes_before # 当前订单信息, 包括：订单对应的动作(pick_up, delivery）,订单的编号，订单的位置
        # print('info', orders, runner_step, orders_all)
        accept = []
        reject = []
        remove_orders = self.remove_order(runner_step, orders_all)  # 删除订单的编号
        for order_id, region in remove_orders:
            for i in range(len(orders[region])):
                if orders[region][i].id == order_id:
                    orders[region][i].status = 'unprocessed'
                    reject.append(orders[region][i].id)
                else:
                    accept.append(orders[region][i].id)
        return accept, reject
        #  判断订单是否接受， 返回拒绝的订单

    def remove_order(self, runner_step, orders_all):
        if self.learning_type != (0 or 10):
            remove_info = self.remove_info_llm(runner_step, orders_all)
            for info_order in remove_info:
                order_id = info_order[0]
                remove_list = []
                for info in self.routes_before:
                    if info[1] == order_id:
                        remove_list.append(info)
                for info in remove_list:
                    self.routes_before.remove(info)
            return remove_info
        elif self.learn_type == 0:
            remove_info = self.remove_info_il()
        return []

    def remove_info_il(self):
        pass

    def remove_info_llm(self, runner_step, orders_all):
        remove_info = []
        for order_info in self.routes_before:
            # print("order_info")
            # print(order_info)
            order_id = order_info[1]
            orders = orders_all[order_info[4]]
            if order_info[0] == 'pickup':
                try:
                    ok = accept_order(self.llm_const_info, runner_step, self.model.meituan.normal_orders_dict[order_id].money, order_info,
                                      self.location,
                                      self.positive_reviews_num, self.negative_reviews_num)
                    if not ok:
                        remove_info.append((order_info[1], order_info[4]))
                        self.order_count -= 1
                except Exception:
                    print("llm接单判断出错，已默认接单")
                    continue
        return remove_info

    def update_money(self, orders_all, intervention_strategy, percentage, time_model, runner_step, riders):
        # 更新订单获得的金钱
        # intervention —— 1: 所有骑手的钱平均分配; 2: 骑手得到自己的钱; 3: 骑手得到一定比例的订单费用，再加上平均分配的费用
        for route_info in self.routes_before:
            if route_info[0] == 'delivery': # 一个订单只更新一次金钱，不能 'delivery' 和 'pickup' 两个路径状态都更新
                continue
            order_id = route_info[1]
            # orders = orders_all[route_info[4]]
            orders = {order.id: order for order in orders_all[route_info[4]]}
            print("gan yu ce lue: ", intervention_strategy)
            average = 0
            performance = 0
            if intervention_strategy == 1:
                print("ping jun ce lue")
                average = orders[order_id].money / len(riders)
                for rider in riders:
                    rider.money += average
                    if rider.id != self.id:
                        self.RL_choose_region.learn(rider.work_region, rider.location, rider.location, average, 0)
                        self.RL_random_work.learn(rider.location, rider.location,  rider.location, average,0)
              
            elif intervention_strategy == 2:
                print("ji xiao")
                self.money += orders[order_id].money
            else:
                print("ji xiao + ping jun ce lue")
                performance = orders[order_id].money * percentage
                self.money += performance
                average = (orders[order_id].money - performance) / len(riders)
                for rider in riders:
                    rider.money += average
                    if rider.id != self.id and percentage < 1:
                        self.RL_choose_region.learn(rider.work_region, rider.location, rider.location, average, 0)
                        self.RL_random_work.learn(rider.location, rider.location,  rider.location, average,0)
              
            # for rider in riders:
            #     print(rider.id, ":", rider.money)
            get_money = orders[order_id].money
            #  每小时更新金钱获得情况time_values new_time_money
            hour_money = str(time_model["hour"])

            reward  = average + performance
            print(" 测试 id ！！！！！！！！！！！！！！！！！", self.id, route_info, self.runner_step, self.actions)
            self.RL_choose_region.learn(self.work_region, self.location,  orders[order_id].delivery_location, reward, len(self.route))
            self.RL_random_work.learn(self.location, self.location,  orders[order_id].delivery_location, reward, len(self.route))
            
            #  用于进化学习的记忆
            if f'{orders[order_id].pickup_location}' in self.order_memory:
                self.order_memory[f"{orders[order_id].pickup_location}"][0] += orders[order_id].money
                if self.order_memory_Normalization[1] < self.order_memory[f"{orders[order_id].pickup_location}"][0]:
                    self.order_memory_Normalization[1] = self.order_memory[f"{orders[order_id].pickup_location}"][0]
            else:
                self.order_memory[f"{orders[order_id].pickup_location}"] = [orders[order_id].money,
                                                                            orders[order_id].pickup_location]
                if self.order_memory_Normalization[0] > self.order_memory[f"{orders[order_id].pickup_location}"][0]:
                    self.order_memory_Normalization[0] = self.order_memory[f"{orders[order_id].pickup_location}"][0]

    def route_to_walk(self, runner_step, grid_now):
        #   将路径规划转化为路径步伐
        while len(self.routes_before):
            #  查看时间是否充裕
            if self.time_position >= abs(self.sleep_time - self.walk_time) * WALK_NUM:
                self.status = 0
            now_pt = self.now_position()  # 获取骑手的当前位置

            a_star_walk = AStarPlanner(now_pt.x, now_pt.y, self.routes_before[0][2],
                                       self.routes_before[0][3])  # 使用A*算法规划从当前点到目标点的路径
            walks = a_star_walk.planning()  # 规划好的路径，包含一系列坐标点

            self.time_position += len(walks[0]) # 计算已经规划好的时间
            # 记录订单信息
            self.update_order_message(self.routes_before[0])

            # 返回评价系统
            # 根据骑手的到达位置获取对应的用户，并更新用户对骑手的评价。
            pt_test = dpt(self.routes_before[0][2], self.routes_before[0][3], 0)
            user = grid_now.get_agent(pt_test)
            if user and user.TYPE == User.TYPE:
                ans = user.evaluate_rider(len(walks))
                print(str(user.id) + "评价骑手" + str(self.id) + str(ans))
                self.total_order += 1
                if ans:
                    self.positive_reviews_num += 1
                else:
                    self.negative_reviews_num += 1

            # 去过的地方进行记录
            # 更新 least_merchant 字典，记录骑手到过的商户和对应的路径长度
            if f"{[self.routes_before[0][2], self.routes_before[0][3]]}" in self.least_merchant.keys():
                self.least_merchant[f"{self.routes_before[0][2], self.routes_before[0][3]}"] += len(walks[0])
            else:
                self.least_merchant[f"{self.routes_before[0][2], self.routes_before[0][3]}"] = len(walks[0])
            self.least_merchant[f"sum"] += len(walks[0])

            # 遍历 walks 中的路径点，将每个路径点添加到 self.position，表示骑手的每一步移动。
            for i in range(len(walks[0])):
                self.position.append([walks[0][i], walks[1][i]])
            # 如果当前路径点不是 pickup，表示订单已完成，更新订单数量。
            if self.routes_before[0][0] != 'pickup':
                self.order_num += 1
                find_merchant = f"{self.routes_before[0][2], self.routes_before[0][3]}"
                self.max_merchant[find_merchant] = 1 if find_merchant not in self.max_merchant \
                    else self.max_merchant[find_merchant] + 1
                self.max_merchant["sum"] += 1
                # 更新商户信息，记录订单完成时间，并将完成时间添加到 count_down 列表。
                temp_time = int(runner_step) + math.ceil(len(self.position) / WALK_NUM)  # 计算完成订单的时间
                self.count_down.append(temp_time)
            # 移除已经处理过的路径点，准备处理下一个路径点。
            self.routes_before.pop(0)

    #  返回当前状态
    def work_status(self, runner_step):
        if not (self.walk_time < runner_step < self.sleep_time):
            status_now = "b"  # 休息
            return status_now
        elif self.random_position[0]:
            status_now = "b"  # 游走
            return status_now
        else:
            status_now = "a"  # 派单
            return status_now

    def update_order_message(self, route_info):
        self.order_info.append([int(self.time_position / self.speed), route_info[0], route_info[1]])

    def order_message(self, runner_step):
        i = 0
        while i < len(self.order_info):
            if runner_step <= self.order_info[i][0]:
                break
            elif i + 1 < len(self.order_info) and runner_step > self.order_info[i][0] and runner_step > \
                    self.order_info[i + 1][0]:
                self.order_info.pop(i)
                i -= 1
            else:
                break
            i += 1
        if len(self.order_info):
            # return str(f"订单{self.order_info[0][2]}：{self.order_info[0][1]}")
            return self.order_info[0], self.money, self.order_num
        else:
            return 0, self.money, self.order_num

    def get_neighbors(self, riders):
        # 获得邻居的信息
        rider_neighbors = []
        # for rider in riders:
        #     if rider != self and points_dis((rider.pt.x, rider.pt.y), (self.pt.x, self.pt.y)) < 100:
        #         rider_neighbors.append(rider)
        for rd in self.network:
            if rd.id != self.id:
                rider_neighbors.append(self.model.riders_list[rd.id])
        return rider_neighbors

    def get_region_reward(self, region_num):
        money_before = self.money - sum(self.money_region)
        self.money_region_now[self.work_region] = money_before
        return self.money_region_now[region_num] / (sum(self.money_region) + 0.1)

    def choose_action(self, state):
        if random.uniform(0, 1) < self.epsilon:
            return random.choice(range(REGION_WORK))
        else:
            return np.argmax(self.q_table[state, :])

    # def random_point_rl(self):
    #     current_action = self.work_region
    #     for episode in range(50):
    #         action = self.choose_action(0)
    #         reword = self.get_region_reward(action)
    #         next_status = 0
    #         self.rl(0, action, reword, next_status)
    #     print("强化学习Q表", self.q_table)
    #     self.work_region = np.argmax(self.q_table)

    def random_point_imit(self, riders):
        #  通过模仿学习，更新行踪，实现游走
        print("befor !!!!!!!!!!!!!!!!", self.random_position)
        neighbors = self.get_neighbors(riders)
        if len(neighbors):  # 前提：有邻居
            self.neighbors = neighbors
            index = self.imit_step()
            print('imit success')
            print("学习成功", self.feature, sum(self.feature), self.id, index, self.neighbors[index].location, self.location)
            self.im_index = index
            if index != -1:
                # self.point_random = self.neighbors[index].location
                neighbor_pt = self.neighbors[index].now_position()
                self.point_random = (neighbor_pt.x, neighbor_pt.y)
        print("end !!!!!!!!!!!!!!!!", self.random_position)
    
    def random_point_rl(self):
        roads = read_map.get_door("道路")[0]
        best_grid, best_value = self.location, -100
        current_value = self.RL_random_work.get_grid_value(self.location)
        for grid in roads:
            duration = abs(self.pt.x - grid[0]) + abs(self.pt.y - grid[1]) 
            discount = math.pow(self.RL_random_work.discount_rate, duration)
            proposed_value = self.RL_random_work.get_grid_value(grid)
            incremental_value = discount * proposed_value - current_value
            if incremental_value > best_value:    

                best_grid, best_value = grid, incremental_value

        random_route = []
        if best_grid[0] !=-1 and  best_grid[1] != -1:    
            self.random_position[0] = True
            self.random_position[1] = best_grid
            a_star = AStarPlanner(self.pt.x, self.pt.y, best_grid[0], best_grid[1])
            route = a_star.planning()
            for i in range(len(route[0])):
              random_route.append([route[0][i], route[1][i]]) 
            self.random_position[2] = random_route
            self.point_random = best_grid
            print("调用强化学习 ！！！！！！！！！！！！！！！！！！！！！！！！！！！！！！", best_grid, self.location, best_value)

    


    # 在添加了网络之后，学习的就是网络上邻居的特征
    def network_imit(self, riders):
        #  通过模仿学习，更新行踪，实现游走
        # print("network_imit befor !!!!!!!!!!!!!!!!", self.random_position)
        neighbors = self.get_neighbors(riders)
        if len(neighbors):  # 前提：有邻居
            self.neighbors = neighbors
            index = self.imit_step()
            self.im_index = index

            if index != -1:
                # self.point_random = self.neighbors[index].location
                neighbor_pt = self.neighbors[index].now_position()
                self.point_random = (neighbor_pt.x, neighbor_pt.y)
            print('imit success', self.feature, sum(self.feature), self.id, index, self.neighbors[index].id, self.neighbors[index].location , self.point_random, self.location)

    def now_fitness(self, time_model):
        """
            用于进化学习，计算适应程度，通过计算每小时的收益来判断
        """
        if time_model["hour"] > 0:
            check_time = time_model["hour"] - 1
        else:
            check_time = time_model["hour"]

        print("fit e:", int(self.info["time_values"][str(check_time)]))
        return int(self.info["time_values"][str(check_time)]) \
            if int(self.info["time_values"][str(check_time)]) > 0 else -1

    def get_chromosome(self):
        """
            用于进化学习，返回个体的染色体
        """
        return [self.dis_import, self.dis_len]

    def update_chromosome(self, new_chromosome):
        self.dis_import = new_chromosome[0]
        self.dis_len = new_chromosome[1]
        self.money_import = 10 - int(self.dis_import)


    def random_point_llm(self):
        """
        使用llm生成游走位置
        """
        self.point_random = get_new_work_location(self.llm_const_info, self.pickup_merchant, self.location,
                                                        self.positive_reviews_num, self.negative_reviews_num)

    def pickup_merchant_update(self, route_info):
        """
        更新获取商家位置的信息
        """
        merchant_location = (route_info[2], route_info[3])
        ok = False
        for i in range(len(self.pickup_merchant)):
            if self.pickup_merchant[i].get("location") == merchant_location:
                times_tmp = self.pickup_merchant[i].get("times")
                self.pickup_merchant[i].update(times=times_tmp + 1)
                ok = True
                break
        if not ok:
            self.pickup_merchant.append({"times": 1,
                                         "location": merchant_location})

    def now_position(self):
        #  返回当前的位置
        if len(self.position):
            return dpt(self.position[-1][0], self.position[-1][1], 0)
        else:
            return self.pt

    def planned_position(self):
        #  已经规划过的路径
        if len(self.routes_before):
            print("route", self.routes_before)
            return dpt(self.routes_before[-1][2], self.routes_before[-1][3], 0)
        else:
            return self.now_position()

    def random_walk(self, walk_tim, grid, riders, runner_step):
        #  随机生成运动的轨迹
        #  如果之前是正常行动，进行路径规划；若已经在路径规划当中，继续执行数组中的位置；若完成或者开始收到订单，刷新
        if self.status in [0, 1]:  # 不需要随机游走
            self.done_position.extend([-1, 0] for _ in range(walk_tim))
            self.update_position.extend([-1, 0] for _ in range(walk_tim))
            self.time_position += walk_tim
            return
        if self.location != self.point_random:
            print("随机游走 ！！！！！！！！", len(self.random_position[2]), self.point_random, self.location, self.random_position[1])

        if len(self.random_position[2]) == 0 and self.learning_type != 7:  # 还为开始游走初始化 且learning_type是规则式
            #  开始规划路线
            if self.learning_type == 10:
                self.random_point_llm()
            # print("after learning:", self.random_position[1])
            now_pt = self.now_position()
            random_node = self.point_random  # 目标地点
            a_star_walk = AStarPlanner(now_pt.x, now_pt.y, random_node[0], random_node[1])
            route = a_star_walk.planning()
            if route is None:
                # 如果大模型给出错误的转移地点，取消此次转移，将转移地点设置为原地。
                print("LLM transfer failed!!!")
                a_star_walk = AStarPlanner(now_pt.x, now_pt.y, now_pt.x, now_pt.y)
                route = a_star_walk.planning()
            self.random_position[2] = []
            for i in range(len(route[0])):
                self.random_position[2].append([route[0][i], route[1][i]])
        while walk_tim:
            
            if len(self.random_position[2]) == 0:
                self.done_position.extend([-1, 0] for _ in range(walk_tim))
                self.update_position.extend([-1, 0] for _ in range(walk_tim))
                self.time_position += walk_tim
                break
            self.position.append(self.random_position[2][0])
            self.move(grid)
            self.random_position[2] = self.random_position[2][1:]
            self.time_position += 1
            walk_tim -= 1

    def save(self) -> Tuple:
        return (
            self.uid, self.pt.coordinates, self.route, self.order_count, self.status, self.done_position, self.position,
            self.count_down, self.order_num)

    def start_walk(self, x, y):
        # 从居住地点到达工作地点
        a_star_test = AStarPlanner(self.pt.x, self.pt.y, x, y)
        route = a_star_test.planning()
        r_len = len(route[0])
        self.time_position += r_len
        for i in range(r_len):
            self.position.append([route[0][i], route[1][i]])

    def move(self, grid):
        # 将坐标移动到目标位置，同时更新变化
        self.labor += 0.01
        self.pt = dpt(self.position[0][0], self.position[0][1], 0)
        self.location = (self.pt.x, self.pt.y)
        self.done_position.append(self.position[0])
        self.update_position.append(self.position[0])
        self.position = self.position[1:]

    def update_region(self):  # location用于派单，更新
        point = self.now_position()
        region_num = get_which_region((point.x, point.y))
        self.work_region = region_num

    def choose_region(self, runner_step, riders):  # 选择获取订单的区域
        ans = []
        if runner_step % 5 == 0:
            for i in range(5):
                if i < REGION_WORK:
                    ans.append(i)
            if len(ans):
                try:
                    if self.learning_type == 0:
                        if len(self.network) != 0:
                            self.network_imit(riders)
                            self.work_region = self.feature[1]
                        else:
                            self.random_point_imit(riders)
                            self.work_region = self.feature[1]
                    elif self.learning_type == 2:
                        self.random_point_rl()
                        self.choose_region_rl()
                    elif self.learning_type == 10:
                        self.work_region = int(
                            choose_region_llm(self.llm_const_info, ans, self.location, self.positive_reviews_num,
                                              self.negative_reviews_num))
                    print("选择区域成功")
                except Exception:
                    print("llm选择商区失败，已随机选择商区")
                    self.work_region = random.choice(ans)
        return self.work_region

    def info_get(self, runner_step):
        status_now = self.work_status(runner_step)
        return [self.pt.x, self.pt.y, runner_step, status_now, self.money]