import asyncio
import csv
import math
from typing import Tuple
import repast4py.space as space
import websocket
from repast4py.core import Agent
from repast4py.space import DiscretePoint as dpt
from repast4py.space import ContinuousSpace
from repast4py.space import ContinuousPoint as cpt
from repast4py import context as ctx
from repast4py import schedule, logging
from mpi4py import MPI
import ast
import shutil
from random import randint, randrange
import pandas as pd
import numpy as np
import json
import copy
import random
import time
import os
import sys
import networkx as nx
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.animation import FuncAnimation
import seaborn as sns
# from spire.xls import Workbook
# from spire.xls.common import ImageFormat

from MeiTuan.outlier_detection import anormal_region_detection
from MeiTuan.dispatch import two_stage_fast_heuristic
from MeiTuan.cal_utility import calUtility
from agentDesign.L1.read_map import AStarPlanner, ReadMap
from emergence.mirco_to_macro import draw_network_graph, save_degree_distribution_plot, update_analysis, update_graph_data
from initmodel.initriders import init_riders
from cognitive.rider_plan import plan_time, plan_position, check_money, trick_to_time
from MeiTuan.generateOrders import fitting_dist, orders_list
from agentDesign.L2.learning_module import ImitationLearning, EvolutionaryLearning

from cognitive.rider_transfer_llm import get_new_work_location
from LLM.accept_order_llm import accept_order
from LLM.choose_region_llm import choose_region_llm
from env import (door_list_random, get_which_region, WALK_NUM, ONE_DAY, ONE_HOUR, ONE_MIN, Region_info, REGION_LINE,
                 REGION_WORK, mean_point, points_dis, NUM_REGION, distance_matrix, read_map, user_list_random,
                 home_list, all_door_list)
from meituan import Platform
from governance import Governance
from user import User
from npc import Npc
from rider import Rider
from order import Order
import openpyxl
from openpyxl.styles import Alignment, Font, PatternFill, Border
import logging
from openpyxl.utils import get_column_letter
from openpyxl.drawing.image import Image
from openpyxl.drawing.xdr import XDRPoint2D, XDRPositiveSize2D
from openpyxl.drawing.spreadsheet_drawing import AnchorMarker, OneCellAnchor
from openpyxl.utils.units import pixels_to_EMU
from collections import Counter
from regulation.dispatch.base_dispatch import *
from regulation.reposition.base_reposition import *


logger = logging.getLogger('error')

def generate_random_numbers():
    while True:
        num1 = random.random()
        num2 = random.random()
        num3 = 1 - (num1 + num2)
        
        if num1 > 0 and num2 > 0 and num3 > 0:
            return num1, num2, num3

# 插入图片函数
def insert_image(worksheet, start_row, start_col, height, image_url, image_size=None):
    """
    # 计算图片大小 img_size[0]=180; img_size[1]=80
    :param worksheet:  openpyxl.Worksheet对象
    :param start_row:   # 哪一行开始
    :param start_col:   # 那一列
    :param height:      # 合并了多少行
    :param image_url:   # 图片路径
    :param image_size:  # 在excel中图片显示的最大宽高
    :return:  # None
    """
    try:
        # 像素转换为EMU
        p2e = pixels_to_EMU
        # 获取图片
        img = Image(image_url)
        # 计算原图片的宽高比例
        per = img.width / img.height
        # 固定图片高为80，通过（原图片宽高比例）计算出宽为多少
        shr_image_width = image_size[1] * per  # w:h     shr_image_width:80
        # 计算出的宽高
        img_size = shr_image_width, image_size[1]
        # 如果图片的宽大于160，就以宽为固定边为160，计算高
        if shr_image_width > image_size[0]:
            shr_image_height = image_size[0] / per  # w:h     150:shr_image_hieght
            # 计算出实际的宽高
            img_size = image_size[0], shr_image_height
        # 把原图片固定为实际的宽高
        img.height, img.width = img_size
        # 计算出图片在excel中位置，是图片的大小
        size = XDRPositiveSize2D(p2e(img.height), p2e(img.width))
        # 设置图片在左右居中
        col_letter = get_column_letter(start_col)  # 定位哪一列
        width = worksheet.column_dimensions[col_letter].width
        _col = int((width * 8 - img.height) / 2)  # 左右偏移量，左右相等就居中
        # 设置图片上下居中（因为行方向合并了多个单元格）
        h2_lst = []  # 每个单元格累计的高度
        acc_height = 0
        for row in range(start_row, start_row + height):
            h = worksheet.row_dimensions[row].height
            if not h:
                worksheet.row_dimensions[row].height = 15
                acc_height += 15
                h2_lst.append(int(acc_height * 20 / 15))
            else:
                acc_height += h
                h2_lst.append(int(acc_height * 20 / 15))
        h2_lst.insert(0, 0)  # 从0开始
        top = int((h2_lst[-1] - img.width) / 2)  # 距离顶部的距离
        pos = []
        for nid, ele in enumerate(h2_lst):
            if top < ele:
                pos.append((nid, top))
        row = start_row + pos[0][0] - 1  # 定位哪一行
        index = pos[0][0]
        _row = pos[0][1] - h2_lst[index]  # 上下偏移量（相对于这个单元格）
        # 相对于单元格内的(x,y,top,left)偏移量
        marker = AnchorMarker(col=start_col - 1, colOff=p2e(_col), row=row, rowOff=p2e(_row))
        img.anchor = OneCellAnchor(_from=marker, ext=size)
        worksheet.add_image(img)
    except Exception as e:
        logger.error(e)
        pass


#  需要更改：在模型中设定棋手数量、网络、学习方式、学学习属性


class Model:
    def __init__(self,
                 run_time: int,  # 设定实验的编号，对应实验数据存储
                 comm: MPI.Intracomm,
                 websocket,
                 rider_len: int,  # 骑手数量
                 sleep_time: list,  # 骑手的休息时间列表
                 walk_time: list,  # 骑手的步行时间列表
                 # 干预策略编号：1代表平均, 2代表绩效, 3代表平均+绩效
                 intervention_strategy: int,
                 # 平均和绩效的比例
                 percentage: float,
                 learning_types: list,  # 学习类型列表
                 array_sex=None,  # 骑手的性别数组（可选）
                 array_diligent=None,  # 骑手的勤奋度数组（可选）
                 regulation_type=None,  # 调控类型（可选）
                 network_type='ER',
                 prob=0.8,  # 网络连接概率
                 reposition_type=1, # 1是有重调度，0是只进行
                 BA_add_edges=5,  # BA 网络新增边数，默认为 3
                 WS_init_neighbors=5,  # WS 网络初始邻居数，默认为 3
                 RG_init_degree=5,  # RG 网络初始度，默认为 3
                 order_prob=None,  # 订单概率字典（可选）
                 order_BF=3,  # 订单波峰数量，默认为 3
                 npc_num=0
                 ):
        super().__init__()
        print("rider_len{}".format(rider_len))
        with open('data.csv', 'w', newline='') as file:
            writer = csv.writer(file)
            writer.writerow(['Runner Step', 'Agent ID', 'Position Length', 'region'])
        self.params_init = [run_time, comm, websocket, rider_len, sleep_time, walk_time, intervention_strategy, percentage, learning_types,
                            array_sex, array_diligent, regulation_type, network_type, prob, reposition_type, BA_add_edges, WS_init_neighbors, RG_init_degree, 
                            order_prob, order_BF, npc_num]
        self.model_run_time = run_time
        self.percentage = percentage
        self.intervention_strategy = intervention_strategy
        self.governance = Governance(intervention_strategy)
        if array_sex is None:
            array_sex = [0 for _ in range(rider_len)]
        if array_diligent is None:
            array_diligent = [1 for _ in range(rider_len)]
        if order_prob is None:
            order_prob = {
                "A": 0.003,
                "B": 0.03,
                "C": 0.003,
                "D": 0.006,
                "E": 0.04
            }
        self.riders = []
        self.merchants = []
        self.high_quality_orders = [[] for _ in range(NUM_REGION)]
        self.normal_orders = [[] for _ in range(NUM_REGION)]
        # 模拟时间
        self.time_model = {"day": 0, "hour": 0, "min": 0}
        self.runner_step = 0  # 时间步
        self.order_id = [0]
        self.idle_rider = list()
        self.idle_order = list()
        # 设置context, rank, grid 等repast4py内容
        self.context = ctx.SharedContext(comm)
        self.rank = comm.Get_rank()
        box = space.BoundingBox(0, 1000, 0, 1000, 0, 0)  # 设置边界
        self.grid = space.SharedGrid(name="grid", bounds=box, borders=space.BorderType.Sticky,
                                     occupancy=space.OccupancyType.Multiple, buffer_size=2, comm=comm)
        self.context.add_projection(self.grid)

        self.reposition_type = reposition_type # 用来控制不同的调控的方法，0是只进行异常检测，1加上重调度

        #系统调控信息收集
        self.all_money = 0 # 所有骑手的收入
        self.is_reposition = 0 #是否进行调控了 调控：1 ,没有调控：0
        self.is_anomalies = 0 # 是否异常 0没有异常 1有异常
        self.outlier_info = [] # 异常信息收集

        # schedule设计
        self.websocket = websocket
        self.walk_features = []
        self.outlier_walker = []
        self.model_int = None
        self.model_r = None
        self.show_data = dict()
        self.npc_num = npc_num
        #  区域信息
        self.distances_region = np.array(distance_matrix())
        # 订单信息初始化
        self.order_prob = order_prob
        self.order_BF = order_BF

        #  用于异常检测数据收集
        self.regulation_type = regulation_type
        self.region_data = np.zeros((NUM_REGION * 4))  # 单次的信息
        self.regions_info = None
        self.region_data_all = []  # 总共的信息
        self.regulation_data = dict(algorithm=[], idx=[], name=[], value=[], params=[])

        # self.regulation_datas = dict

        # 创建骑手和商家的代理
        self.eat_list = read_map.get_door("饭店")
        # 进化学习
        self.evolut_id = []  # 进化学习：更新旗手的信息的编号
        # 下雨地区记录
        self.rain_region = []
        # 信息记录
        self.info_index = ''
        self.pd_model = None

        self.riders_list = {}  # 棋手的名单字典
        rider_id_move = []
        idle_riders_info = []
        print(rider_len, learning_types, "LLLLL")
        for i in range(npc_num):
            npc = Npc(i, self.rank, self, random.choice(all_door_list)[0])
            self.context.add(npc)
            self.grid.move(npc, npc.pt)
        for i in range(rider_len):
            # print(array_sex[i],
            #       array_diligent[i])
            w1, w2, w3 = generate_random_numbers()
            rider = Rider(i, self.rank, 3, self, learning_types[i], walk_time[i], sleep_time[i], array_sex[i],
                          array_diligent[i], "10步/step", [],
                          random.choice(home_list)[0], True, w1, w2, w3, random.uniform(0.2, 0.5))
            idle_riders_info.append({"id": rider.id,
                                     "region": rider.work_region,
                                     "income": rider.money,
                                     "cost": rider.labor,
                                     "position": rider.location})
            # print(rider.id, rider.uid, "check")
            # self.riders.append(rider)
            self.riders_list[rider.id] = rider
            self.context.add(rider)
            # self.schedule.add(rider)
            # self.space.add_agent(rider, (i, i))  # 添加到空间中
            self.grid.move(rider, rider.home)
            rider_id_move.append(str(i))
        self.show_data["agents"] = idle_riders_info

        self.dispatch_mode = "TSFH"
        self.dispatch_params = dict(idx=list(self.riders_list))

        self.reposition_mode = "Use Outlier"
        self.reposition_params = dict(idx=list(self.riders_list))
        
        self.control_actions = {2: [1, 3]} # 因果分析控制使用，key时id，数组中，第一个是控制接单，第二个是控制选择商区
        self.control_id = 2
        # 建立棋手之间的网络
        prob = prob / rider_len if rider_len != 0 else 0
        self.network_type = network_type
        if network_type == 'ER':  # 随机网络
            self.G = nx.erdos_renyi_graph(n=rider_len, p=prob)
        elif network_type == 'BA':  # 无标度网络
            self.G = nx.barabasi_albert_graph(n=rider_len, m=BA_add_edges)
        elif network_type == 'WS':  # WS小世界网络
            self.G = nx.watts_strogatz_graph(n=rider_len, k=WS_init_neighbors, p=prob)
        elif network_type == 'RG':  # 规则网络
            self.G = nx.random_regular_graph(n=rider_len, d=RG_init_degree)
        else:
            self.G = nx.random_regular_graph(n=rider_len, d=0)

        # 初始化邻居
        for node in self.G.nodes:
            for neighbor in self.G.neighbors(node):
                self.riders_list[node].network.append(self.riders_list[neighbor])

        for index, user_point in enumerate(user_list_random):
            user = User(dpt(user_point[0][0], user_point[0][1]), index, self.rank)
            self.context.add(user)
            self.grid.move(user, user.location)

        self.evolut_num = 0
        for rider in self.context.agents():
            if rider.TYPE == Rider.TYPE:
                if rider.learning_type == 10:
                    self.evolut_num += 1
        # emergence
        self.last_six_steps = []
        self.degree_distributions = {}
        # 创建一个空的有向图
        self.G1 = nx.DiGraph()
        # 首次运行时，创建 CSV 文件并写入表头
        self.csv_file = 'network_analysis_results.csv'
        with open(self.csv_file, 'w') as f:
            f.write("Step,Network Density,Average Path Length (SCC),Most Influential Riders,Number of Connected Components,Size of Largest Component\n")

        # utility
        self.calUtility = calUtility()
        # 记录信息
        script_dir = os.path.dirname(os.path.abspath(__file__))
        folder = "order_log"
        folder_path = os.path.join(script_dir, folder)
        if not os.path.exists(folder_path):
            os.makedirs(folder_path)
        path = f"order.csv"
        filepath_agent = os.path.join(folder_path, path)
        with open(filepath_agent, mode='w', newline='') as file:
            file.seek(0)
            file.truncate()
            writer = csv.writer(file)
            writer.writerow(["x", "y", "time_segment", "rider_status", "money", "order_density"])
        self.info_log()
        move_path = os.path.join(script_dir, 'Storage/move_log')
        if not os.path.exists(move_path):
            os.makedirs(folder_path)
        path_move = f"move_{self.model_run_time}.csv"
        filepath_move = os.path.join(move_path, path_move)
        with open(filepath_move, mode='w', newline='') as file:
            writer = csv.writer(file)
            writer.writerow(rider_id_move)
        # 骑手移动信息坐标记录
        if rider_len > 0:
            self.move_csv = pd.read_csv(filepath_move)

        self.meituan = Platform(self.order_prob, self.order_BF, 0.2, self)
    
    def reset(self, return_type=0): #增加return_type，选择返回的是宏观的还是微观的 0是微观 1是宏观
        self.__init__(*self.params_init)
        if return_type == 0:
            return self.riders_list[self.control_id].return_info + [0.0] + [0.0, 0.0, 0.0]
        elif return_type == 1:
            return [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
        else:
            return [0.0 for _ in range(47)]

    # 记录骑手的移动信息到 CSV 文件
    def move_info_log(self):
        for agent in self.context.agents(Rider.TYPE):
            target_column = self.move_csv.columns[self.move_csv.columns.str.contains(str(agent.id))][0]
            self.move_csv[target_column] = pd.Series(agent.done_position)

    def display_network(self):  # 画出更新后的网络图
        plt.clf()  # 清除之前的图像
        nx.draw(self.G, with_labels=True)  # 绘制网络图
        plt.title(f'Step {self.step}')  # 设置标题
        plt.pause(0.1)  # 暂停一段时间，允许图形更新（单位为秒）

    def update_networks(self):  # 初始化网络
        for index, node in enumerate(self.G.nodes()):
            neighbors = [neighbor for neighbor in self.G.neighbors(node) if neighbor != node]
            # if not neighbors:
            neighbor_node = random.choice(list(self.G.nodes()))
            rider_neighbor = self.G.nodes[neighbor_node]['rider']
            self.G.add_edge(node, neighbor_node)
            self.G.nodes[node]['rider'].init_riders_network([self.riders_list[rider_neighbor.id]])
            # else:
            #     neighbor_node = random.choice(neighbors)
            #     rider_neighbor = self.G.nodes[neighbor_node]['rider']
            #     print('neighbor', neighbor_node, node)
            #     self.G.remove_edge(node, neighbor_node)
            #     self.G.nodes[node]['rider'].remove_riders_network([self.riders_list[rider_neighbor.id]])


    def info_log(self):
        #  每一次vi模型运行时候记录信息，包括模型信息，棋手信息
        all_items = os.listdir('./info_log')
        folder_nums = [int(name) for name in all_items if os.path.isdir(os.path.join('./info_log', name))]
        ans = max(folder_nums) if folder_nums else -1
        if self.reposition_type == 0:
          new_folder_path = os.path.join('./info_log', str(self.model_run_time + list(self.riders_list.values())[0].learning_type))
        else:
          new_folder_path = os.path.join('./info_log', str(self.model_run_time + 1 + list(self.riders_list.values())[0].learning_type))
        # if os.path.exists(new_folder_path):
        #     shutil.rmtree(new_folder_path)
        # os.makedirs(new_folder_path)
        self.info_index = new_folder_path
        csv_file_path = os.path.join(new_folder_path, 'utility.csv')
        with open(csv_file_path, 'w', newline='') as csvfile:
            writer = csv.writer(csvfile)
            writer.writerow(['system_utility', 'entropy', 'productivity', 'third', 'all_money'])

        csv_file_path2 = os.path.join(new_folder_path, "agent_utility.csv")
        with open(csv_file_path2, 'w', newline='') as csvfile:
            writer = csv.writer(csvfile)
            writer.writerow([str(i) for i in range(len(self.riders_list))])

        csv_file_path3 = os.path.join(new_folder_path, "agent_move.csv")
        data_move = [[int(0) for _ in range(25)] for _ in range(25)]
        with open(csv_file_path3, 'w', newline="") as csvfile:
            writer = csv.writer(csvfile)
            writer.writerows(data_move)
        self.pd_model = data_move

        csv_file_path4 = os.path.join(new_folder_path, "agent_money.csv")
        data_money = [0 for _ in range(len(self.riders_list))]
        with open(csv_file_path4, "w", newline="") as csvfile:
            writer = csv.writer(csvfile)
            writer.writerow(data_money)

        csv_file_path_causal = os.path.join(new_folder_path, 'causal_info.csv')
        with open(csv_file_path_causal, 'w', newline='') as csvfile:
            writer = csv.writer(csvfile)
            writer.writerow(["agent_id", "time_step", "agent_utility", "avg_utility", "system_utility", "act_rules",
                             "cooperation", "topology", "agent_skill"])

        csv_file_path_outlier = os.path.join(new_folder_path, 'outlier.csv')
        with open(csv_file_path_outlier, 'w', newline='') as csvfile:
            writer = csv.writer(csvfile)
            regions = [f'region{i}' for i in range(1, 11)]
            writer.writerow(regions)

    # 填充实验报告，包括设置实验目标、实验设置、骑手和商家设置，插入图表，并将报告保存为 Excel 文件和图片
    def fill_report(self):
        wb = openpyxl.load_workbook("Storage/实验报告模板.xlsx")
        ws = wb.active
        # 实验目标
        experiment_target = "探究不同智能程度下骑手效能以及系统效能规律"
        ws.title = experiment_target
        ws.cell(2, 1, experiment_target)

        # 实验设置数据
        rider_num = 400
        merchant_num = 21
        time_segment = "2分钟/step"
        episode_num = 1000
        regular_num = 100
        imitation_num = 100
        rl_num = 100
        LLM_num = 100
        ws.cell(5, 3, rider_num)
        ws.cell(6, 3, merchant_num)
        ws.cell(7, 3, time_segment)
        ws.cell(8, 3, episode_num)
        ws.cell(9, 3, regular_num)
        ws.cell(10, 3, imitation_num)
        ws.cell(11, 3, rl_num)
        ws.cell(12, 3, LLM_num)

        # 骑手设置
        rider_speed = "10步/step"
        working_time = "8:00-15:00"
        max_time = "8小时"
        max_carry = 3
        view_range = 10
        district_choice = "商区1，商区2，\n商区3，商区4"
        ws.cell(5, 7, rider_speed)
        ws.cell(6, 7, working_time)
        ws.cell(7, 7, max_time)
        ws.cell(8, 7, max_carry)
        ws.cell(9, 7, view_range)
        ws.cell(10, 7, district_choice)

        # 商家设置
        order_regular = "7:00-10:00早高峰，\n11:00-13:00午高峰，\n17:00-20:00晚高峰"
        order_refresh = "5step刷新一次"
        order_past = "10step"
        ws.cell(5, 11, order_regular)
        ws.cell(8, 11, order_refresh)
        ws.cell(11, 11, order_past)

        # 图片插入
        plt.plot([1, 2, 3, 4], [1, 4, 9, 16])
        plt.title('Sample Plot')
        # plt.savefig('Storage/rider_money.png')
        # plt.savefig('Storage/rider_efficiency.png')
        # plt.savefig('Storage/merchant_money.png')
        # plt.savefig('Storage/merchant_efficiency.png')
        img = Image('Storage/rider_money.png')
        height = img.height * 0.75
        width = img.width * 0.75
        insert_image(ws, 15, 4, 14, "Storage/rider_money.png", (height, width))
        insert_image(ws, 15, 10, 14, "Storage/rider_efficiency.png", (height, width))
        insert_image(ws, 30, 4, 14, "Storage/merchant_money.png", (height, width))
        insert_image(ws, 30, 10, 14, "Storage/merchant_efficiency.png", (height, width))

        plt.close()

        wb.save("Storage/test.xlsx")
        workbook = openpyxl.Workbook()
        workbook.LoadFromFile("Storage/test.xlsx")
        sheet = workbook.Worksheets.get_Item(0)

        # 将工作表转换为图片
        image = sheet.ToImage(sheet.FirstRow, sheet.FirstColumn, sheet.LastRow, sheet.LastColumn)

        # 将图片保存为PNG文件
        image.save("Storage/ans.png", format="PNG")

    # 将系统效用、熵值、生产力和其他数据记录到 utility.csv 文件。
    def write_u(self, x, y, z, w, u):
        csv_path = os.path.join(self.info_index, 'utility.csv')
        with open(csv_path, 'a', newline='') as csvfile:
            writer = csv.writer(csvfile)
            writer.writerow([x, y, z, w, u])

    # 将骑手效用数据记录到 agent_utility.csv 文件。
    def agent_u(self, data: dict):
        csv_path = os.path.join(self.info_index, 'agent_utility.csv')
        with open(csv_path, 'a', newline='') as csvfile:
            writer = csv.writer(csvfile)
            writer.writerow(data.values())
    
    def outlier_u(self):
        csv_path = os.path.join(self.info_index, 'outlier.csv')
        with open(csv_path, 'a', newline='') as csvfile:
            writer = csv.writer(csvfile)
            writer.writerow(self.outlier_info)
        path_info = self.info_index + '/outlier.csv'
        self.plot_heatmap_from_csv(path_info, './outiler.png')

    # 将订单信息记录到 order_log/order.csv 文件。
    def order_log(self, id, pickup, delivery, money):
        path = "order_log/order.csv"
        a_star_test = AStarPlanner(pickup[0], pickup[1], delivery[0], delivery[1])
        route = a_star_test.planning()
        r_len = len(route[0])
        with open(path, mode='a', newline='') as file:
            writer = csv.writer(file)
            writer.writerow([id, pickup, delivery, money, r_len])

    # 更新显示信息，包括骑手、订单、区域等数据，并记录系统和骑手效用信息。
    # regions_info：区域信息。
    # transfer_move_data：骑手移动数据。
    # rider_show_data：骑手显示数据。
    # rider_order_info：骑手订单信息。
    # rider_money：骑手收入。
    # order_num：订单数量。
    # nums_id：编号数据。
    def update_show_infor(self, regions_info, transfer_move_data, rider_show_data, rider_order_info, rider_money,
                          order_num, nums_id, agents_info):
        num_riders = sum(len(sublist) for sublist in self.riders)
        num_orders = sum(len(sublist) for sublist in self.meituan.normal_orders)
        self.idle_rider.append(num_riders)
        self.idle_order.append(num_orders)

        # 每个区域空闲订单的数目
        self.region_num = [{"name": "block " + str(i), "value": len(self.meituan.normal_orders[i])} for i in
                           range(len(self.meituan.normal_orders))]

        # self.idle_riders_info = [ { "id":d.id,
        #                             "region": d.work_region,
        #                             "income": d.money} for sublist in self.riders for d in sublist]

        idle_riders_info = list()
        now_agent_matrix = dict()
        self.all_money = 0
        
        for agent in self.context.agents():
            if agent.TYPE == Rider.TYPE:
                self.all_money += agent.money
                idle_riders_info.append({"id": agent.id,
                                         "region": agent.work_region,
                                         "income": agent.money,
                                         "cost": agent.labor,
                                         "position": agent.location})
                now_agent_matrix[str(agent.id)] = [agent.money - agent.pre_money, agent.labor - agent.pre_labor]
                agent.pre_money = agent.money
                agent.pre_labor = agent.labor
        print("idle rider !!!!!!!!!", idle_riders_info)
        print("idle order !!!!!!!!!", self.region_num)
        if len(self.riders_list) > 0:
            self.calUtility.update_agent_matrix(now_agent_matrix, self.runner_step - 1)
            self.calUtility.update_utility(self.runner_step - 1)
            self.write_u(self.calUtility.system_utility_list["system_utility"][self.runner_step - 1],
                         self.calUtility.system_utility_list["entropy"][self.runner_step - 1],
                         self.calUtility.system_utility_list["productivity"][self.runner_step - 1],
                         self.calUtility.avg_utility_list[self.runner_step - 1],
                         self.all_money)
            self.agent_u(self.calUtility.agent_utility_matrix)
        # print("move", transfer_move_data)
        self.outlier_u()
        self.show_data = {
            "run_time": self.model_run_time,
            "tick": self.time_model,
            "regions_info": regions_info,
            "move": transfer_move_data,
            "id_regions": nums_id,
            "sumDataInfor": [self.idle_rider, self.idle_order],
            "region_count": self.region_num,
            "regulate_riders": idle_riders_info,
            "utility": [self.calUtility.system_utility_list, self.calUtility.agent_utility_matrix,
                        self.calUtility.avg_utility_list],
            "rider_show": rider_show_data,
            "rider_order": rider_order_info,
            "rider_money": rider_money,
            "agents": idle_riders_info,
            "order_num": order_num,
            "agents_info": agents_info}


    # 收集异常区域的信息。
    def collect_anormal_region(self, outliers, regions_data):
        region_infos = list()
        for i in range(NUM_REGION):

            if outliers[-1][i] == 1:
                if i >= 4:
                    types = "饭店"
                else:
                    types = "公司"
                region_info = dict(
                    id=i,
                    type=types,
                    numIdleR=regions_data[i * 4 + 1],
                    numbusyR=regions_data[i * 4],
                    numIdleO=regions_data[i * 4 + 2]
                )
                region_infos.append(region_info)
        return region_infos

    # 执行进化学习算法，对骑手进行进化学习。
    def evolution_learn(self, riders, learners_num, time_model):
        el = EvolutionaryLearning(learners_num)
        for rider in riders:
            if rider.now_fitness(time_model) == -1:
                return
            el.add_to_group(rider.id, rider.get_chromosome(), rider.now_fitness(time_model))
        el.reproduce_descendant(('roulette_wheel_selection', learners_num - 1),
                                ('single_point_crossover', None))
        all_ids = set(rider.id for rider in riders)  # 获得需要更新的编号
        ans_ids = dict((ans.id, ans.chromosome) for ans in el.group)
        missing_ids = all_ids - set(ans_ids.keys())
        missing_riders = [rider for rider in riders if rider.id in missing_ids]
        for rider in missing_riders:
            if len(el.update_info):
                rider.update_chromosome(el.update_info[0][1])
                el.update_info.pop(0)
            else:
                break

    def actions_change(self): # 因果分析可以调用这个函数，改变骑手的行为
        for key, nums in self.control_actions.items():
            self.riders_list[key].actions = nums

    def step(self, action=-1, action_region=-1, return_info_type=0, control_id=0, reposition_type=0): # action -1: 骑手自己决策，1是默认接单，0是拒绝订单； action_region：-1是骑手自主决策，0-4是4个商区；return_info_type：0返回微观，1返回宏观; control_id: 骑手的id; 2是返回因果数据
        # print('edges', self.G.edges())
        self.reposition_type = reposition_type
        self.control_id = control_id
        self.control_actions = {self.control_id : [action, action_region]}
        # print('self.control_actions',self.control_actions)
        self.actions_change() # 因果分析控制骑手的行动
        # 记录最终结果
        transfer_move_data = []  # 骑手的移动信息
        rider_show_data = []  # 旗手 当前工作状态
        rider_order_info = []  # 当前派送订单
        rider_money = []  # 棋手的金钱
        rider_order_num = []  # 棋手的订单数量
        # npc移动
        # 检查是否有类型为Npc.TYPE的代理存在
        if Npc.TYPE in self.context._agents_by_type:
            for npc in self.context.agents(Npc.TYPE):
                npc.step()
                transfer_move_data.append(npc.move_positions)
                npc.move_positions = []
        # 更新时间步
        self.runner_step += 1
        self.time_model["day"] = int(self.runner_step / ONE_DAY)
        self.time_model["hour"] = int(self.runner_step % ONE_DAY / ONE_HOUR)
        self.time_model["min"] = int(self.runner_step) % ONE_DAY % ONE_HOUR * ONE_MIN
        self.riders = [[] for _ in range(NUM_REGION)]  # 不同区域的骑手
        riders_all = []  # 所有区域的骑手
        self.region_data = np.zeros((NUM_REGION * 4))

        # 分类骑手
        for agent in self.context.agents():

            if agent.TYPE == Rider.TYPE:

                riders_all.append(agent)
                if agent.order_count < agent.max_orders and (agent.status == 2 or agent.status == 3):
                    self.riders[agent.work_region].append(agent)
                    # print('self.riders', self.riders)
        print('runner_step',self.runner_step)
        self.meituan.step(self.runner_step)

        regions_id = [[] for _ in range(4)]  # 记录小地图对应的编号 # 用于记录每个区域的骑手ID
        agents_info = [] #记录每一个agent信息
        id_info = {}  # 需要返回的因果分析的信息
        collect_agents_info = [0, 0, 0, 0] # 收入 ， 休息时间， 劳动成本， 最多获得订单数量
        every_region = [] # 暂时存储选择的区域
        for agent in self.context.agents():
            if agent.TYPE == Rider.TYPE:
                # 让骑手执行他们的 `step` 方法
                # # 返回的信息包括： [位置（dpt形式）, 开始工作时间, 休息时间, "如果没有订单之后的信息是-1"订单id, pickup_location, delivery_location, 订单类型(0表示的是普通订单，1表示高级订单)] 
                id_info[agent.id] = agent.step(self.governance.intervention_strategy, self.percentage, self.time_model, self.grid,
                                 self.meituan.normal_orders, riders_all, self.order_id, self.runner_step, self.eat_list)
                collect_agents_info = [x + y for x, y in zip(collect_agents_info, [agent.money, agent.sleep_time - agent.walk_time, agent.labor, agent.max_orders])]
                every_region.append(agent.work_region)
                num_id = 0 if agent.pt.x < REGION_LINE else 1
                regions_id[num_id].append(agent.id)
                self.log_info_rl(agent)
                agents_info.append(agent.now_info_send())
                # 更新记录info
                row_index = int(agent.pt.x / 20)
                row_index_2 = int(agent.pt.y / 20)
                col_name = row_index_2

                # 计算骑手的位置索引，用于更新位置模型 pd_model
                self.pd_model[row_index][col_name] = str(int(self.pd_model[row_index][col_name]) + 1)
                with open(self.info_index + '/agent_move.csv', "w", newline="") as csvfile:
                    csvwriter = csv.writer(csvfile)
                    csvwriter.writerows(self.pd_model)

                self.update_region_data_rider(agent)  # 更新骑手信息到region矩阵里面

                transfer_move_data.append(agent.update_position)

                # 将骑手的移动信息、工作状态、订单信息和收入添加到相应的列表中
                rider_show_data.append(agent.work_status(self.runner_step))
                rider_info_tmp, rider_money_tmp, rider_order_tmp = agent.order_message(self.runner_step)
                rider_order_info.append(rider_info_tmp)
                rider_money.append(int(rider_money_tmp))
                rider_order_num.append(rider_order_tmp)
        collect_agents_info = [x / len(self.riders_list) for x in collect_agents_info]# 平均

        # 获得最多出现的区域
        # print('self.control_actions',self.control_actions)
        counter = Counter(every_region)
        most_region = counter.most_common(1)[0][0]
        collect_agents_info.append(most_region)

        # 更新订单相关区域信息
        self.region_info_update_order()
        # 在所有信息更新完后进行异常检测，以便进行骑手的重定位
        # if self.regulation_type is not None:
        #     region_obser = self.generate_region_info()
        #     # 调用 anormal_region_detection 方法进行异常检测
        #     outlier, self.model_r, self.model_int = anormal_region_detection(region_obser, self.model_r, self.model_int)
        #     # 更新区域信息 regions_info
        #     self.regions_info = self.collect_anormal_region(outlier, self.region_data)
        #     # 调用 handle_regulation 方法处理异常
        #     self.handle_regulation()


        # 调用 reposition 方法重新定位骑手
        region_obser = self.generate_region_info()
        # # 进行重调度
        if self.reposition_type :
          self.meituan.reposition(region_obser)
        # 不进行重调度的异常检测
        else:
          self.meituan.reposition_without(region_obser)
        # 调用 update_show_infor 方法更新显示信息
        self.update_show_infor(self.regions_info, transfer_move_data, rider_show_data, rider_order_info, rider_money,
                               self.meituan.order_num, regions_id, agents_info)

        for key, nums in self.control_actions.items():  # 增加效能信息
            id_info[key].append(self.calUtility.agent_utility_matrix[str(key)])
            agent_info = id_info[key]


        # 当前step所有骑手名称
        riders = ['Rider A', 'Rider B', 'Rider C']
        # 当前step所有订单名称
        orders = ['Order 1', 'Order 2', 'Order 3', 'Order 4', 'Order 5']
        # 当前step的所有交互关系，举例（包括骑手接单、骑手拒单、骑手协作）
        interactions = [('Rider A', 'Order 1', 2, 'accepted'), ('Rider C', 'Order 5', 2, 'rejected'), ('Rider A', 'Rider B', 1, 'collaboration')]
        
        # 更新图数据interactions
        update_graph_data(self.G1, riders, orders, interactions)
        # 更新分析数据并保存图的状态
        self.last_six_steps, self.degree_distributions = update_analysis(self.G1, self.runner_step, self.csv_file,  self.last_six_steps, self.degree_distributions)
        # 仅绘制并保存最后6步的图
        for i, graph in enumerate(self.last_six_steps):
            draw_network_graph(graph, len(self.last_six_steps) - 6 + i)

        # 保存度分布变化图
        save_degree_distribution_plot(self.degree_distributions)

        # 政府计算公平性
        riders = []
        for rids in self.riders:
          for r in rids:
            riders.append(r)
        print("len ", (len(riders)))
        fairness = self.governance.cal_fairness(self.riders_list.values())
        print("公平性：{} !!!!!!!!!!!!!".format(fairness))
        if self.runner_step % 10 == 0 and len(self.riders_list) > 0:  # 记录移动信息
            # 每10步记录一次移动信息。
            self.move_info_log()
            script_dir = os.path.dirname(os.path.abspath(__file__))
            move_path = os.path.join(script_dir, 'Storage/move_log')
            path_move = f"move_{self.model_run_time}.csv"
            filepath_move = os.path.join(move_path, path_move)
            self.move_csv.to_csv(filepath_move, index=False)
        platform_info = [self.calUtility.system_utility_list["system_utility"][self.runner_step - 1],
                         self.calUtility.system_utility_list["entropy"][self.runner_step - 1],
                         self.calUtility.system_utility_list["productivity"][self.runner_step - 1],
                         self.calUtility.avg_utility_list[self.runner_step - 1]] + collect_agents_info + [self.riders_list[self.control_id].w1, self.riders_list[self.control_id].w2, self.riders_list[self.control_id].w3]
        # 信息分别是：系统效能，价值熵，生产力， 平均效能，平均金钱， 平均休息时间， 平均劳动成本， 平均最多获得订单数量， 最多选择区域
        if return_info_type == 0:  # 微观
            return id_info[self.control_id]  + [self.riders_list[self.control_id].w1, self.riders_list[self.control_id].w2, self.riders_list[self.control_id].w3] #[位置（dpt形式）, 开始工作时间, 休息时间, "如果没有订单之后的信息是None"订单id, pickup_location, delivery_location, 订单类型(0表示的是普通订单，1表示高级订单), 价值熵] 
        elif return_info_type == 1:
            return platform_info # 宏观
        else:
            print('长度', len(self.region_data))
            print(self.is_reposition, 
                    self.is_anomalies, 
                    self.calUtility.system_utility_list["system_utility"][self.runner_step - 1],
                    self.calUtility.system_utility_list["entropy"][self.runner_step - 1],
                    self.calUtility.system_utility_list["productivity"][self.runner_step - 1],
                    self.calUtility.avg_utility_list[self.runner_step - 1],
                    self.all_money , self.region_data)
            return [self.is_reposition, 
                    self.is_anomalies, 
                    self.calUtility.system_utility_list["system_utility"][self.runner_step - 1],
                    self.calUtility.system_utility_list["entropy"][self.runner_step - 1],
                    self.calUtility.system_utility_list["productivity"][self.runner_step - 1],
                    self.calUtility.avg_utility_list[self.runner_step - 1],
                    self.all_money] + self.region_data.tolist()

  

    
    # 更新骑手在区域中的数据
    def update_region_data_rider(self, agent: Rider):
        # 更新其中的region_data中的棋手信息
        region_num = agent.work_region
        if agent.order_count > 0:
            self.region_data[region_num * 4] += 1 # 繁忙骑手数
        else:
            self.region_data[region_num * 4 + 1] += 1 # 空闲骑手数

    # 生成区域信息
    def generate_region_info(self):
        # 反馈的实时信息，字典形式
        region_info = dict()
        distance = self.distances_region
        self.region_data_all.append(self.region_data)

        region_info["data"] = np.array(self.region_data_all)
        region_info["distance"] = distance
        region_info["number_region"] = NUM_REGION
        region_info["number_source"] = 4
        region_info["detect_start"] = 0
        region_info["startTime"] = self.runner_step 
        region_info["endTime"] = len(self.region_data_all)
       
        return region_info

    # 生成调度信息
    # def generate_dispatch_info(self, rids):
    #     dispatch_observ = list()
    #     num = 0
    #     for rid in rids:
    #         d = self.context.agent((rid, Rider.TYPE, self.rank))
    #         I = 0
    #         if d.status == 2 or d.status == 3:
    #             now_pt = d.pt
    #             orders = self.meituan.normal_orders[d.]:  # 改为只为普通订单调度
    #                 for o in orders:
                        # a_star_walk = AStarPlanner(now_pt.x, now_pt.y, o.pickup_location[0], o.pickup_location[1])
                        # walks1 = a_star_walk.planning()
                        # a_star_walk = AStarPlanner(o.pickup_location[0], o.pickup_location[1], o.delivery_location[0],
                        #                            o.delivery_location[1])
                        # walks2 = a_star_walk.planning()
                        # if d.location == o.pickup_location:
                        #     distance = 0
                        # else:
                        # distance = len(self.env.grid.paths[str((d.position.x, d.position.y))][str((o.source.x, o.source.y))]) - 1
                        # distance = self.env.grid.plan_path((d.position.x, d.position.y), (o.source.x, o.source.y))[1] -1

        #                 dispatch = {"order_id": o.id,
        #                             "driver_id": d.id,
        #                             "driver_location": d.location,
        #                             "driver_energy": d.money,
        #                             "driver_labor": d.labor,
        #                             # "order_driver_distance": len(walks1),
        #                             "order_start_location": o.pickup_location,
        #                             "order_finish_location": o.delivery_location,
        #                             # "order_distance": len(walks2),
        #                             "timestamp": self.runner_step,
        #                             # "order_finish_timestamp": self.runner_step + math.ceil(
        #                             #     (len(walks1) + len(walks2)) / 10),
        #                             "reward_units": o.money}
        #                 dispatch_observ.append(dispatch)
        # return dispatch_observ

    # 刷新订单信息
    def refresh_order(self):
        #  更新订单的id信息，同时更新订单信息到信息矩阵当中
        for orders in self.meituan.normal_orders:
            i = 0
            for order in orders:
                order.id = i
                i += 1
                # 更新到信息矩阵当中
                num_region_order = order.region
                if order.status == "unprocessed":
                    self.region_data[num_region_order * 4 + 2] += 1
                    self.region_data[num_region_order * 4 + 3] += order.money
        time.sleep(1)

    def region_info_update_order(self):
      # 更新订单信息
        for i in range(len(self.meituan.normal_orders)):
            for order in self.meituan.normal_orders[i]:
                num_region_order = order.region
                if order.status == "unprocessed":
                    self.region_data[num_region_order * 4 + 2] += 1
                    self.region_data[num_region_order * 4 + 3] += order.money
        # num_region_order = get_which_region(order.pickup_location)
        # self.region_data[num_region_order * 4 + 2] += 1
        # self.region_data[num_region_order * 4 + 3] += order.money

    def order_update(self, runner_step):
        # 更新订单信息
        for i in range(len(self.meituan.normal_orders)):
            for order in self.meituan.normal_orders[i]:
                if int(runner_step) >= int(order.delete_time) and order.status == "unprocessed":
                    order.status = 'processed'
                    self.meituan.normal_orders[i].remove(order)
                elif order.status == "processed":
                    #  金钱计算还未实现！！！！！！！！！！！！！！！！！！！！！！！
                    self.meituan.normal_orders[i].remove(order)

        self.refresh_order()



    def save_regulation_info(self, data, detial):
        data["params"] = list()

        for i in range(len(data["algorithm"])):
            params = dict()
            # 验证调控时长是否已经过去

            for j in range(len(detial)):
                if int(detial[j]["end"]) < self.runner_step or int(detial[j]["start"]) > self.runner_step:
                    continue
                if detial[j]["name"] == data["name"][i]:
                    params = detial[j]
                    break

            data["params"].append(params)
            if data["algorithm"][i] in self.regulation_data["algorithm"]:
                index = self.regulation_data["algorithm"].index(data["algorithm"][i])
                self.meituan.regulation_data["name"][index] = data["name"][i]
                self.meituan.regulation_data["value"][index] = data["value"][i]
                self.meituan.regulation_data["params"][index] = params
            else:
                self.meituan.regulation_data["name"].append(data["name"][i])
                self.meituan.regulation_data["value"].append(data["value"][i])
                self.meituan.regulation_data["params"].append(params)
            
            if data["name"][i] == "distribution":
                # self.intervention_strategy = data["value"]
                self.governance.intervention_strategy =  data["value"]


    def random_region_point(self, region):
        boundary = Region_info[region]
        x = random.randint(boundary[0][0], boundary[-1][0])
        y = random.randint(boundary[0][1], boundary[-1][1])
        return (x, y)

    # def handle_regulation(self):
    #     removes = []
    #     for i in range(len(self.regulation_data["algorithm"])):
    #         if self.regulation_data["algorithm"][i] == "dispatch algorithm":
    #             if "start" in self.regulation_data["params"][i] and \
    #                     int(self.regulation_data["params"][i]["start"]) <= self.runner_step and int(
    #                 self.regulation_data["params"][i]["end"]) >= self.runner_step:
    #                 self.dispatch_mode = self.regulation_data["value"][i]
    #                 self.dispatch_params = self.regulation_data["params"][i]
    #             elif "start" not in self.regulation_data["params"][i]:
    #                 self.dispatch_mode = self.regulation_data["value"][i]
    #                 self.dispatch_params = dict(idx=list(self.riders_list.keys()))
    #             else:
    #                 self.dispatch_mode = "TSFH"
    #                 self.dispatch_params = dict(idx=list(self.riders_list.keys()))
    #                 removes.append(i)

    #         if self.regulation_data["algorithm"][i] == "reposition algorithm":
    #             if "start" in self.regulation_data["params"][i] and \
    #                     int(self.regulation_data["params"][i]["start"]) <= self.runner_step and int(
    #                 self.regulation_data["params"][i]["end"]) >= self.runner_step:
    #                 self.reposition_mode = self.regulation_data["value"][i]
    #                 self.reposition_params = self.regulation_data["params"][i]
    #             elif "start" not in self.regulation_data["params"][i]:
    #                 self.reposition_mode = self.regulation_data["value"][i]
    #                 self.reposition_params = dict(idx=list(self.riders_list.keys()))
    #             else:
    #                 self.reposition_mode = "None"
    #                 self.reposition_params = dict(idx=list(self.riders_list.keys()))
    #                 removes.append(i)

    #     for i in removes:
    #         del self.regulation_data["algorithm"][i], self.regulation_data["name"][i], self.regulation_data["value"][i], \
    #         self.regulation_data["params"][i]


    # def reposition(self):
    #     reposition_action = []

    #     rider_infor = []
    #     for idx in self.reposition_params["idx"]:
    #         agent = self.context.agent((int(idx), Rider.TYPE, self.rank))
    #         rider_infor.append({"id": agent.id,
    #                             "status": agent.status,
    #                             "work_region": agent.work_region
    #                             })
    #     if self.reposition_mode == "random":
    #         reposition_action = random_reposition(dict(), rider_infor, read_map.get_door("道路"))
    #     elif self.reposition_mode == "use outlier":
    #         reposition_action = useOutlier(dict(), rider_infor, self.regions_info)

    #     for rep in reposition_action:
    #         agent = self.context.agent((rep["rider_id"], Rider.TYPE, self.rank))
    #         agent.status = 3
    #         agent.route = rep["destination"]

    # # 根据不同的调度模式分配订单给骑手，以优化订单分配和骑手调度
    # def dispatch(self):
    #     solution = {}
    #     dispatch_observ = self.generate_dispatch_info(self.dispatch_params["idx"])
    #     if self.dispatch_mode == "TSFH":
    #         for i in range(len(self.meituan.normal_orders)):
    #             if self.riders[i]: random.shuffle(self.riders[i])
    #             solution.update(two_stage_fast_heuristic(self.meituan.normal_orders[i], self.riders[i], D=100))

    #         return solution

    #         # 这些调度方法和上面返回的格式不一样，记得修改
    #     if self.dispatch_mode == "greedy":
    #         dispatch_action = GreedyAlgorithm(dispatch_observ)
    #     elif self.dispatch_mode == "distance_greedy":
    #         dispatch_action = DistanceGreedyAlgorithm(dispatch_observ)
    #     elif self.moddispatch_modeispatch_modede == "NNP":
    #         dispatch_action = NearestNeighborPriority(dispatch_observ)
    #     elif self.dispatch_mode == "Hungarian":
    #         dispatch_action = HungarianAlgorithm(dispatch_observ)

    # 关闭 WebSocket 连接
    async def close(self):
        self.websocket.close()

    def plot_heatmap_from_csv(self, input_csv_path, output_image_path):
        # 读取CSV数据
        data = pd.read_csv(input_csv_path)

        # 创建一个布尔掩码，仅保留非零值
        mask = data!= 0.0

        # 创建一个热力图
        plt.figure(figsize=(10, 8))
        sns.heatmap(data, annot=False, mask=~mask, cmap="magma", cbar=False, linewidths=0.5, linecolor='gray')
        plt.title("Heatmap of Regions")
        plt.xlabel("Regions")
        plt.ylabel("Time")
        # 保存热力图到文件
        plt.savefig(output_image_path)
        plt.close()  # 关闭图形以释放内存

    def order_density(self, x, y):
        ans = 0
        for i in range(len(self.meituan.normal_orders)):
            for order in self.meituan.normal_orders[i]:
                if points_dis(order.pickup_location, (x, y)) <= 250:
                    ans += 1
        return ans

    def log_info_rl(self, rider: Rider):
        info = rider.info_get(self.runner_step)
        num = self.order_density(rider.pt.x, rider.pt.y)
        info.append(num)
        script_dir = os.path.dirname(os.path.abspath(__file__))
        folder = "order_log"
        folder_path = os.path.join(script_dir, folder)
        if not os.path.exists(folder_path):
            os.makedirs(folder_path)
        path = f"order.csv"
        filepath_agent = os.path.join(folder_path, path)
        with open(filepath_agent, mode='a', newline='') as file:
            writer = csv.writer(file)
            # "x", "y", "time_segment",  "rider_status", "money", "order_density"
            writer.writerow(info)
        pass


async def run_model(params):  # 根据不同组参数运行模型 step次数写在函数中，可以更改
    for index, param in enumerate(params):
        if index > 1:
            break
        model = Model(index, *param)
        model.reset(0)
        print('x-x' * 20)
        for _ in range(300): #异常检测需要 60 个时间步的数据训练，所以最早在 60个时间步后开始检测 运行时间需要大于 60
            model.step(1, randint(0, 4), 1)
        agent_money = pd.DataFrame()
        for rider in model.riders_list.values():
          agent_money[rider.id] = rider.step_income
        csv_file_path = os.path.join(model.info_index, "agent_money.csv")
        agent_money.to_csv(csv_file_path)
 
def set_seed(seed):
    random.seed(seed)
    np.random.seed(seed)

if __name__ == "__main__":
    # 设置随机种子
    set_seed(0)

    with open('memories/riders.json') as f:
        members = json.load(f)
    rider_len = 5
    array_walk = [random.randint(0, 10) for _ in range(rider_len)]
    array_sleep = [random.randint(200, 210) for _ in range(rider_len)]
    array_sex = [random.randint(0, 1) for _ in range(rider_len)]
    array_diligent_level = [random.choice([1, 5, 10]) for _ in range(rider_len)]
    learning_type = [1 for _ in range(rider_len)]
    # intervention_strategy = random.choice([1,2,3])
    intervention_strategy = 3
    # percentage = random.random()
    percentage = 0.5
    # print(rider_len, array_sleep, array_walk, intervention_strategy, percentage, learning_type, array_sex,
    #       array_diligent_level)


    # prob=0.8,  # 网络连接概率
    #              BA_add_edges=5,  # BA 网络新增边数，默认为 3
    #              WS_init_neighbors=5,  # WS 网络初始邻居数，默认为 3
    #              RG_init_degree=5,  # RG 网络初始度，默认为 3
    
    # params = [[MPI.COMM_WORLD, None, 10, [300, 300, 300, 300, 300 ,300, 300, 300, 300, 300],
    #               [2, 2, 2, 3, 4, 2, 2, 2, 3, 4], 3, 0.5, [7, 7, 7, 7, 7, 7, 7, 7, 7, 7],
    #               [1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [10, 10, 10, 10, 10, 10, 10, 10, 10, 10], None, 'ER', 0.8, 1], [MPI.COMM_WORLD, None, 10, [300, 300, 300, 300, 300 ,300, 300, 300, 300, 300],
    #               [2, 2, 2, 3, 4, 2, 2, 2, 3, 4], 3, 0.5, [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
    #               [1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [10, 10, 10, 10, 10, 10, 10, 10, 10, 10], None, 'ER', 0.8, 0], [MPI.COMM_WORLD, None, 10, [200, 200, 200, 200, 200 ,200, 200, 200, 200, 200],
    #               [2, 2, 2, 3, 4, 2, 2, 2, 3, 4], 3, 0.5, [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
    #               [1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [10, 10, 10, 10, 10, 10, 10, 10, 10, 10], None, 'BA', 0.8, 0, 2], [MPI.COMM_WORLD, None, 10, [200, 200, 200, 200, 200 ,200, 200, 200, 200, 200],
    #               [2, 2, 2, 3, 4, 2, 2, 2, 3, 4], 3, 0.5, [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
    #               [1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [10, 10, 10, 10, 10, 10, 10, 10, 10, 10], None, 'BA', 0.8, 4],[MPI.COMM_WORLD, None, 10, [200, 200, 200, 200, 200 ,200, 200, 200, 200, 200],
    #               [2, 2, 2, 3, 4, 2, 2, 2, 3, 4], 3, 0.5, [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
    #               [1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [10, 10, 10, 10, 10, 10, 10, 10, 10, 10], None, 'BA', 0.8, 6], [MPI.COMM_WORLD, None, 10, [200, 200, 200, 200, 200 ,200, 200, 200, 200, 200],
    #               [2, 2, 2, 3, 4, 2, 2, 2, 3, 4], 3, 0.5, [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
    #               [1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [10, 10, 10, 10, 10, 10, 10, 10, 10, 10], None, 'BA', 0.8, 8],[MPI.COMM_WORLD, None, 10, [200, 200, 200, 200, 200 ,200, 200, 200, 200, 200],
    #               [2, 2, 2, 3, 4, 2, 2, 2, 3, 4], 3, 0.5, [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
    #               [1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [10, 10, 10, 10, 10, 10, 10, 10, 10, 10], None, 'WS', 0.8, 2, 2], [MPI.COMM_WORLD, None, 10, [200, 200, 200, 200, 200 ,200, 200, 200, 200, 200],
    #               [2, 2, 2, 3, 4, 2, 2, 2, 3, 4], 3, 0.5, [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
    #               [1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [10, 10, 10, 10, 10, 10, 10, 10, 10, 10], None, 'WS', 0.8, 2, 4], [MPI.COMM_WORLD, None, 10, [200, 200, 200, 200, 200 ,200, 200, 200, 200, 200],
    #               [2, 2, 2, 3, 4, 2, 2, 2, 3, 4], 3, 0.5, [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
    #               [1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [10, 10, 10, 10, 10, 10, 10, 10, 10, 10], None, 'WS', 0.8, 2, 6], [MPI.COMM_WORLD, None, 10, [200, 200, 200, 200, 200 ,200, 200, 200, 200, 200],
    #               [2, 2, 2, 3, 4, 2, 2, 2, 3, 4], 3, 0.5, [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
    #               [1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [10, 10, 10, 10, 10, 10, 10, 10, 10, 10], None, 'WS', 0.8, 2, 8], [MPI.COMM_WORLD, None, 10, [200, 200, 200, 200, 200 ,200, 200, 200, 200, 200],
    #               [2, 2, 2, 3, 4, 2, 2, 2, 3, 4], 3, 0.5, [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
    #               [1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [10, 10, 10, 10, 10, 10, 10, 10, 10, 10], None, 'RG', 0.8, 2, 2, 2], [MPI.COMM_WORLD, None, 10, [200, 200, 200, 200, 200 ,200, 200, 200, 200, 200],
    #               [2, 2, 2, 3, 4, 2, 2, 2, 3, 4], 3, 0.5, [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
    #               [1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [10, 10, 10, 10, 10, 10, 10, 10, 10, 10], None, 'RG', 0.8, 2, 2, 4], [MPI.COMM_WORLD, None, 10, [200, 200, 200, 200, 200 ,200, 200, 200, 200, 200],
    #               [2, 2, 2, 3, 4, 2, 2, 2, 3, 4], 3, 0.5, [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
    #               [1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [10, 10, 10, 10, 10, 10, 10, 10, 10, 10], None, 'RG', 0.8, 2, 2, 6], [MPI.COMM_WORLD, None, 10, [200, 200, 200, 200, 200 ,200, 200, 200, 200, 200],
    #               [2, 2, 2, 3, 4, 2, 2, 2, 3, 4], 3, 0.5, [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
    #               [1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [10, 10, 10, 10, 10, 10, 10, 10, 10, 10], None, 'RG', 0.8, 2, 2, 8] ]
    params = [[MPI.COMM_WORLD, None, 10, [300, 300, 300, 300, 300 ,300, 300, 300, 300, 300],
                  [2, 2, 2, 3, 4, 2, 2, 2, 3, 4], 3, 1, [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], #[2, 2, 2, 2, 2, 2, 2, 2, 2, 2],
                  [1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [10, 10, 10, 10, 10, 10, 10, 10, 10, 10], None, 'ER', 1, 0] ]
    # params存放了两组参数，为了确保运行不同参数，进行对比，以进行试验；
    # 每个数组就是myModel的参数， 当前初始化了run_time 到 array_diligent的参数；
    # 网络的参数从prob - RG_init_degree的参数，如果没有用到的参数可以写成默认值
    # 可以设置几组试验参数 看实验结果

    # model.fill_report()
    # 运行异步函数
    asyncio.run(run_model(params))
#  5规则式 10 进化学习 0 模仿学习 1 大模型 2 强化学习 3 随机游走
