import asyncio
import csv
import math
from typing import Tuple
import repast4py.space as space
from repast4py.core import Agent
from repast4py.space import DiscretePoint as dpt
from repast4py.space import ContinuousSpace
from repast4py.space import ContinuousPoint as cpt
from repast4py import context as ctx
from repast4py import schedule, logging
from mpi4py import MPI

from random import randint, randrange
import pandas as pd
import numpy as np
import json
import copy
import random
import time
import os
import sys
import networkx as nx
import matplotlib.pyplot as plt
from matplotlib.animation import FuncAnimation
from MeiTuan.outlier_detection import anormal_region_detection
from MeiTuan.dispatch import two_stage_fast_heuristic
from MeiTuan.cal_utility import calUtility
from L1.read_map import AStarPlanner, ReadMap
from initmodel.initriders import init_riders
from cognitive.rider_plan import plan_time, plan_position, check_money, trick_to_time
from MeiTuan.generateOrders import fitting_dist, orders_list
from L2.learning_module import ImitationLearning, EvolutionaryLearning

from cognitive.rider_transfer_llm import get_new_work_location
from LLM.accept_order_llm import accept_order
from LLM.choose_region_llm import choose_region_llm
from env import (door_list_random, get_which_region, WALK_NUM, ONE_DAY, ONE_HOUR, ONE_MIN, Region_info, REGION_LINE,
                 REGION_WORK, mean_point, points_dis)
from user import User


#  运行模型时候新建棋手，需要选择：学习方式，活动时间


class Rider(Agent):
    TYPE = 0

    def __init__(self,
                 r_id: int,
                 rank: int,
                 max_orders,
                 model,
                 learning_type: int,
                 standing_time: int,
                 sleeping_time: int,
                 sex: int,
                 diligent_level: int,
                 home: tuple,
                 ):
        super().__init__(id=r_id, rank=rank, type=Rider.TYPE)
        # 初始化骑手的属性，例如位置、速度等
        self.model = model  # 当前模型
        self.route = []  # 美团规划路线路径
        self.order_count = 0  # 当前处理的订单数
        self.max_orders = max_orders  # 最多处理订单数
        self.pt = dpt(home[0], home[1], 0)  # 骑手的当前位置
        self.location = (self.pt.x, self.pt.y)
        self.learning_type = learning_type  # 生成指定的学习的类型， 0是模仿学习，1是大模型决策
        self.order_info = []  # 当前手中的订单

        # 分别设置没有完成的步伐和已经完成的步伐，已经完成的步伐可以进行可视化
        self.position = []  # 存放需要完成的步数
        self.done_position = []  # 存放已经完成的步数  // 之后可以删去
        self.update_position = []  # 用于向前端传送当前step中行走的步数
        self.random_position = [False, door_list_random[1: 13][self.id % 10][0],
                                []]  # 休闲时间随机游走，对应的是为了接到更多的订单
        self.work_region = get_which_region(self.random_position[1])  # 获得工作的区域
        self.move_regions = self.regions_work()  # 获得可以活动的范围
        self.walk_time = standing_time  # 起来的时间
        self.sleep_time = sleeping_time  # 睡觉的时间

        # 表示自己已经进入工作状态，可以接单，0表示不能接单, 1表示在前往工作的路上或者在处理手上的单，2表示可以接单 ,3 表示正在被调往其他区域，调度过程中可以接单
        self.status = 0
        self.routes_before = []
        self.money = 0
        self.labor = 0  # 劳动成本 用移动距离表示
        self.home = self.pt
        self.count_down = []
        self.order_num = 0
        self.time_position = 0
        self.check = 0

        self.pre_money = 0
        self.pre_labor = 0  # 劳动成本 用移动距离表示

        self.max_merchant = {"sum": 0}
        self.least_merchant = {"sum": 0}
        self.one_hour_info = [0]  # 第一条：一小时内转的钱

        #  模仿学习使用的属性
        self.feature = [self.random_position[1], self.work_region]
        self.feature_effect = [1]  # 能力属性效果，必须于feature中的顺序一致,默认全都为正向属性
        self.feature_normalize = []  # 标准化后的属性值

        #  llm使用的属性
        self.pickup_merchant = []
        self.positive_reviews_num = 0
        self.negative_reviews_num = 0
        self.total_order = 0
        # 常量
        self.sex = sex
        self.diligent_level = diligent_level
        self.llm_const_info = {'id': self.id, 'sex': self.sex, 'diligent_level': self.diligent_level}
        # 进化学习所使用的属性
        self.dis_len = random.randint(200, 300)
        self.dis_import = random.randint(1, 8)
        self.money_import = 10 - self.dis_import
        self.order_memory = {}
        self.order_memory_Normalization = [0, 0, 0, 0]

        self.network = {}  # riders network
        # 记录信息
        script_dir = os.path.dirname(os.path.abspath(__file__))
        folder = "llm_agent_log"
        folder_path = os.path.join(script_dir, folder)
        if not os.path.exists(folder_path):
            os.makedirs(folder_path)
        path = f"deliver_{self.id}_record_llm.csv"
        filepath_agent = os.path.join(folder_path, path)

        with open(filepath_agent, mode='w', newline='') as file:
            file.seek(0)
            file.truncate()
            writer = csv.writer(file)
            writer.writerow(["id", "x", "y", "time", "order_cluster", "work_status", "money", "home_x", "home_y",
                             "start_work_time", "end_work_time", "day", "total_order", "sex", "diligent_level",
                             "learn_type"])

        script_dir = os.path.dirname(os.path.abspath(__file__))
        folder = "llm_agent_log"
        folder_path = os.path.join(script_dir, folder)
        if not os.path.exists(folder_path):
            os.makedirs(folder_path)
        path = f"{self.id}_response_llm.txt"
        filepath_agent = os.path.join(folder_path, path)
        with open(filepath_agent, mode='w', newline='') as file:
            file.write("start")

    def init_riders_network(self, other_riders):  # 初始化网络，单个棋手与当前棋手建立网络
        for other_rider in other_riders:
            if other_rider.id not in self.network:
                self.network[other_rider.id] = other_rider
                other_rider.network[self.id] = self
                print(f"棋手{self.id}和{other_rider.id}建立联系")

    def remove_riders_network(self, other_riders):  # 删除棋手之间的网络
        for other_rider in other_riders:
            if other_rider.id in self.network:
                del self.network[other_rider.id]
                del other_rider.network[self.id]
                print(f"棋手{self.id}和{other_rider.id}删除联系")

    async def step(self, intervention_strategy, percentage, time_model, grid, orders, riders, order_id, runner_step, eat_list):
        # 清空当前传送位置
        self.update_position = []
        # 骑手前往工作地点
        if int(self.model.runner_step) == self.walk_time:
            self.random_position[1] = door_list_random[1: 13][self.id % 10][0]
            self.start_walk(self.random_position[1][0], self.random_position[1][1])  # 从家出发
            self.status = 1  # 状态转为前往工作地点

        # 开始具体调用派单算法
        if self.status == 2:
            # 调用算法,将路径转化为步伐
            self.routes_before = self.routes_before + self.route  # 之前的路径
            if len(self.routes_before) > 0 and runner_step % 10 == 0:
                self.choose_order(orders, runner_step, orders)
            self.update_money(orders, intervention_strategy, percentage,
                              self.model.time_model, self.model.runner_step, riders)  # 更新订单收益同时更新对收益的记忆
            # 更新商户信息
            for route_info in self.routes_before:
                self.pickup_merchant_update(route_info)
            self.route_to_walk(runner_step, grid)  # 转化路径为位置

        if self.status == 3:
            self.routes_before = self.routes_before + self.route  # 之前的路径
            for route_info in self.routes_before:
                self.pickup_merchant_update(route_info)
            self.route_to_walk(runner_step, grid)  # 转化路径为位置

        self.route = self.routes_before = []  # 更新信息

        # 查看是否需要更新订单数量
        if int(runner_step) in self.count_down:
            change_num = self.count_down.count(int(runner_step))
            for i in self.count_down:
                if i == int(runner_step):
                    self.order_count -= 1
            self.count_down.remove(int(runner_step))

        # 转为休息状态
        if int(runner_step) > self.sleep_time and len(self.position) == 0:
            self.start_walk(self.home.x, self.home.y)  # 回家

        #  status 变为休息状态
        if (int(runner_step) > self.sleep_time or
                self.time_position >= abs(self.sleep_time) * WALK_NUM):
            self.status = 0
            #  更新为第二天的作息
            self.walk_time += ONE_DAY
            self.sleep_time += ONE_DAY

        # 每一个时间步rider统一走10步
        walk_tim = WALK_NUM
        if len(self.position):
            self.random_position[0] = False  # 随机行动是否重新开始
            walk_len = walk_tim if len(self.position) > walk_tim else len(self.position)
            for i in range(walk_len):
                self.move(grid)
            walk_tim -= walk_len
        elif (self.status == 1 or self.status == 3) and len(self.position) == 0:
            self.status = 2  # 开始工作

        #  随机游走
        if walk_tim > 0:
            self.random_walk(walk_tim, grid, riders, runner_step)
        if (not (self.status == 1 or self.status == 3)) and runner_step >= 10 and runner_step % 5 == 0:
            if self.status != 0:
                self.money_check(riders, runner_step)  # 更新所在区域
                self.move_regions = self.regions_work()  # 更新可以获得的范围

        self.llm_info_log(runner_step, time_model)

    def money_check(self, riders, runner_step):
        riders_money = [rider.money for rider in riders]
        if self.money < sum(riders_money) / len(riders_money):
            self.choose_region(runner_step, riders)

    def regions_work(self):
        #  获得当前位置可以获得的订单所在的公司和饭店的范围, 默认是500
        region_work = []
        now_pt = self.now_position()
        for i in range(len(Region_info)):
            x, y = mean_point(Region_info[i])
            dis = math.sqrt(abs(now_pt.x - x) ** 2 + abs(now_pt.y - y) ** 2)
            if dis < 100:
                region_work.append(i)
        return region_work

    def action_log(self, runner_step, action, money, next_location, after_time):
        pass

    def llm_info_log(self, runner_step, time_model):
        path = f"llm_agent_log/deliver_{self.id}_record_llm.csv"

        with open(path, mode='a', newline='') as file:
            writer = csv.writer(file)
            writer.writerow([self.id, self.location[0], self.location[1], runner_step, 0, self.work_status(runner_step),
                             int(self.money), self.home.x, self.home.y, self.walk_time, self.sleep_time,
                             int(runner_step) // ONE_DAY + 1, self.total_order, self.sex, self.diligent_level, "llm"])

    def choose_order(self, orders, runner_step, orders_all):
        #  self.routes_before # 当前订单信息, 包括：订单对应的动作(pick_up, delivery）,订单的编号，订单的位置
        remove_orders = self.remove_order(runner_step, orders_all)  # 删除订单的编号
        for order_id, region in remove_orders:
            for i in range(len(orders[region])):
                if orders[region][i].id == order_id:
                    orders[region][i].status = 'unprocessed'
        #  判断订单是否接受， 返回拒绝的订单

    def remove_order(self, runner_step, orders_all):
        if self.learning_type != (0 or 10):
            remove_info = self.remove_info_llm(runner_step, orders_all)
            for info_order in remove_info:
                order_id = info_order[0]
                for info in self.routes_before:
                    if info[1] == order_id:
                        self.routes_before.remove(info)
            return remove_info
        return []

    def remove_info_llm(self, runner_step, orders_all):
        remove_info = []
        for order_info in self.routes_before:
            print("order_info")
            print(order_info)
            order_id = order_info[1]
            orders = orders_all[order_info[4]]
            if order_info[0] == 'pickup':
                try:
                    ok = accept_order(self.llm_const_info, runner_step, orders[order_id].money, order_info,
                                      self.location,
                                      self.positive_reviews_num, self.negative_reviews_num)
                    if not ok:
                        remove_info.append((order_info[1], order_info[4]))
                except Exception:
                    print("llm接单判断出错，已默认接单")
                    continue
        return remove_info

    def update_money(self, orders_all, intervention_strategy, percentage, time_model, runner_step, riders):
        # 更新订单获得的金钱
        # intervention —— 1: 所有骑手的钱平均分配; 2: 骑手得到自己的钱; 3: 骑手得到一定比例的订单费用，再加上平均分配的费用
        for route_info in self.routes_before:
            order_id = route_info[1]
            orders = orders_all[route_info[4]]
            print("gan yu ce lue: ", intervention_strategy)
            if intervention_strategy == 1:
                print("ping jun ce lue")
                average = orders[order_id].money / len(riders)
                for rider in riders:
                    rider.money += average
            elif intervention_strategy == 2:
                print("ji xiao")
                self.money += orders[order_id].money
            else:
                print("ji xiao + ping jun ce lue")
                performance = orders[order_id].money * percentage
                self.money += performance
                average = (orders[order_id].money - performance) / len(riders)
                for rider in riders:
                    rider.money += average
            for rider in riders:
                print(rider.id, ":", rider.money)
            get_money = orders[order_id].money
            #  每小时更新金钱获得情况time_values new_time_money
            hour_money = str(time_model["hour"])
            #  用于进化学习的记忆
            if f'{orders[order_id].pickup_location}' in self.order_memory:
                self.order_memory[f"{orders[order_id].pickup_location}"][0] += orders[order_id].money
                if self.order_memory_Normalization[1] < self.order_memory[f"{orders[order_id].pickup_location}"][0]:
                    self.order_memory_Normalization[1] = self.order_memory[f"{orders[order_id].pickup_location}"][0]
            else:
                self.order_memory[f"{orders[order_id].pickup_location}"] = [orders[order_id].money,
                                                                            orders[order_id].pickup_location]
                if self.order_memory_Normalization[0] > self.order_memory[f"{orders[order_id].pickup_location}"][0]:
                    self.order_memory_Normalization[0] = self.order_memory[f"{orders[order_id].pickup_location}"][0]

    def route_to_walk(self, runner_step, grid_now):
        #   将路径规划转化为路径步伐
        while len(self.routes_before):
            #  查看时间是否充裕
            if self.time_position >= abs(self.sleep_time) * WALK_NUM:
                self.status = 0
            now_pt = self.now_position()

            a_star_walk = AStarPlanner(now_pt.x, now_pt.y, self.routes_before[0][2], self.routes_before[0][3])
            walks = a_star_walk.planning()
            self.time_position += len(walks[0])  # 计算已经规划好的时间
            # 记录订单信息
            self.update_order_message(self.routes_before[0])

            # 返回评价系统
            pt_test = dpt(self.routes_before[0][2], self.routes_before[0][3], 0)
            user = grid_now.get_agent(pt_test)
            if user and user.TYPE == User.TYPE:
                ans = user.evaluate_rider(len(walks))
                print(str(user.id) + "评价骑手" + str(self.id) + str(ans))
                self.total_order += 1
                if ans:
                    self.positive_reviews_num += 1
                else:
                    self.negative_reviews_num += 1

            # 去过的地方进行记录
            if f"{[self.routes_before[0][2], self.routes_before[0][3]]}" in self.least_merchant.keys():
                self.least_merchant[f"{self.routes_before[0][2], self.routes_before[0][3]}"] += len(walks[0])
            else:
                self.least_merchant[f"{self.routes_before[0][2], self.routes_before[0][3]}"] = len(walks[0])
            self.least_merchant[f"sum"] += len(walks[0])

            for i in range(len(walks[0])):
                self.position.append([walks[0][i], walks[1][i]])
            if self.routes_before[0][0] != 'pickup':
                self.order_num += 1
                find_merchant = f"{self.routes_before[0][2], self.routes_before[0][3]}"
                self.max_merchant[find_merchant] = 1 if find_merchant not in self.max_merchant \
                    else self.max_merchant[find_merchant] + 1
                self.max_merchant["sum"] += 1

                temp_time = int(runner_step) + int(len(self.position) / WALK_NUM)  # 计算完成订单的时间
                self.count_down.append(temp_time)

            self.routes_before.pop(0)

    #  返回当前状态
    def work_status(self, runner_step):
        if not (self.walk_time < runner_step < self.sleep_time):
            status_now = "b"  # 休息
            return status_now
        elif self.random_position[0]:
            status_now = "b"  # 游走
            return status_now
        else:
            status_now = "a"  # 派单
            return status_now

    def update_order_message(self, route_info):
        self.order_info.append([int(self.time_position / WALK_NUM), route_info[0], route_info[1]])

    def order_message(self, runner_step):
        i = 0
        while i < len(self.order_info):
            if runner_step <= self.order_info[i][0]:
                break
            elif i + 1 < len(self.order_info) and runner_step > self.order_info[i][0] and runner_step > \
                    self.order_info[i + 1][0]:
                self.order_info.pop(i)
                i -= 1
            else:
                break
            i += 1
        if len(self.order_info):
            # return str(f"订单{self.order_info[0][2]}：{self.order_info[0][1]}")
            return self.order_info[0], self.money, self.order_num
        else:
            return 0, self.money, self.order_num

    def get_neighbors(self, riders):
        # 获得邻居的信息
        rider_neighbors = []
        # for rider in riders:
        #     if rider != self and points_dis((rider.pt.x, rider.pt.y), (self.pt.x, self.pt.y)) < 100:
        #         rider_neighbors.append(rider)
        for id_order in self.network:
            rider_neighbors.append(self.model.riders_list[id_order])
        return rider_neighbors

    def random_point_imit(self, riders):
        #  通过模仿学习，更新行踪，实现游走
        neighbors = self.get_neighbors(riders)
        if len(neighbors):  # 前提：有邻居
            learning = ImitationLearning(self, neighbors, 0.8)
            learning.set_feature_effect(self.feature_effect)
            index = learning.individual_imitation()
            self.random_position[1] = self.feature[0]
            self.init_riders_network([self.model.riders_list[index]])
            nodes = []
            self.model.G.add_edge(self.id, index)
            print("学习成功")

    def now_fitness(self, time_model):
        """
            用于进化学习，计算适应程度，通过计算每小时的收益来判断
        """
        if time_model["hour"] > 0:
            check_time = time_model["hour"] - 1
        else:
            check_time = time_model["hour"]

        print("fit e:", int(self.info["time_values"][str(check_time)]))
        return int(self.info["time_values"][str(check_time)]) \
            if int(self.info["time_values"][str(check_time)]) > 0 else -1

    def get_chromosome(self):
        """
            用于进化学习，返回个体的染色体
        """
        return [self.dis_import, self.dis_len]

    def update_chromosome(self, new_chromosome):
        self.dis_import = new_chromosome[0]
        self.dis_len = new_chromosome[1]
        self.money_import = 10 - int(self.dis_import)

    def random_point_evolut(self):
        """
            使用遗传算法的属性获得随即游走的位置
        """
        now_pt = self.now_position()
        final_ans = []
        for info in self.order_memory.items():
            dis_len = len(AStarPlanner(now_pt.x, now_pt.y, info[1][1][0], info[1][1][1]).planning())
            if dis_len < self.dis_len:
                final_ans.append([dis_len / self.dis_len, (info[1][0] - self.order_memory_Normalization[0]) /
                                  (self.order_memory_Normalization[1] - self.order_memory_Normalization[0] + 0.01),
                                  info[1][1]])
        print("final ans: ", final_ans)
        final_ans = sorted(final_ans, key=lambda x: x[0] * self.money_import + x[1] * self.dis_import, reverse=True)
        if len(final_ans):
            self.random_position[1] = final_ans[0][-1]

    def random_point_llm(self):
        """
        使用llm生成游走位置
        """
        self.random_position[1] = get_new_work_location(self.llm_const_info, self.pickup_merchant, self.location,
                                                        self.positive_reviews_num, self.negative_reviews_num)

    def pickup_merchant_update(self, route_info):
        """
        更新获取商家位置的信息
        """
        merchant_location = (route_info[2], route_info[3])
        ok = False
        for i in range(len(self.pickup_merchant)):
            if self.pickup_merchant[i].get("location") == merchant_location:
                times_tmp = self.pickup_merchant[i].get("times")
                self.pickup_merchant[i].update(times=times_tmp + 1)
                ok = True
                break
        if not ok:
            self.pickup_merchant.append({"times": 1,
                                         "location": merchant_location})

    def now_position(self):
        #  返回当前的位置
        if len(self.position):
            return dpt(self.position[-1][0], self.position[-1][1], 0)
        else:
            return self.pt

    def planned_position(self):
        #  已经规划过的路径
        if len(self.routes_before):
            print("route", self.routes_before)
            return dpt(self.routes_before[-1][2], self.routes_before[-1][3], 0)
        else:
            return self.now_position()

    def random_walk(self, walk_tim, grid, riders, runner_step):
        #  随机生成运动的轨迹
        #  如果之前是正常行动，进行路径规划；若已经在路径规划当中，继续执行数组中的位置；若完成或者开始收到订单，刷新
        if self.status in [0, 1]:  # 不需要随机游走
            self.done_position.extend([-1, 0] for _ in range(walk_tim))
            self.update_position.extend([-1, 0] for _ in range(walk_tim))
            self.time_position += walk_tim
            return

        if not self.random_position[0]:  # 还为开始游走初始化
            #  开始规划路线
            if self.learning_type == 0:
                ax = 1
            elif self.learning_type == 10:
                self.random_point_evolut()
            else:
                self.random_point_llm()
            # print("after learning:", self.random_position[1])
            now_pt = self.now_position()
            random_node = self.random_position[1]  # 目标地点
            a_star_walk = AStarPlanner(now_pt.x, now_pt.y, random_node[0], random_node[1])
            route = a_star_walk.planning()
            if route is None:
                # 如果大模型给出错误的转移地点，取消此次转移，将转移地点设置为原地。
                print("LLM transfer failed!!!")
                a_star_walk = AStarPlanner(now_pt.x, now_pt.y, now_pt.x, now_pt.y)
                route = a_star_walk.planning()
            self.random_position[2] = []
            for i in range(len(route[0])):
                self.random_position[2].append([route[0][i], route[1][i]])
            self.random_position[0] = True
        while walk_tim:
            if len(self.random_position[2]) == 0:
                self.done_position.extend([-1, 0] for _ in range(walk_tim))
                self.update_position.extend([-1, 0] for _ in range(walk_tim))
                self.time_position += walk_tim
                break
            self.position.append(self.random_position[2][0])
            self.move(grid)
            self.random_position[2] = self.random_position[2][1:]
            self.time_position += 1
            walk_tim -= 1

    def save(self) -> Tuple:
        return (
            self.uid, self.pt.coordinates, self.route, self.order_count, self.status, self.done_position, self.position,
            self.count_down, self.order_num)

    def start_walk(self, x, y):
        # 从居住地点到达工作地点
        a_star_test = AStarPlanner(self.pt.x, self.pt.y, x, y)
        route = a_star_test.planning()
        r_len = len(route[0])
        self.time_position += r_len
        for i in range(r_len):
            self.position.append([route[0][i], route[1][i]])

    def move(self, grid):
        # 将坐标移动到目标位置，同时更新变化
        self.labor += 0.01
        self.pt = dpt(self.position[0][0], self.position[0][1], 0)
        self.location = (self.pt.x, self.pt.y)
        self.done_position.append(self.position[0])
        self.update_position.append(self.position[0])
        self.position = self.position[1:]

    def update_region(self):  # location用于派单，更新
        point = self.now_position()
        region_num = get_which_region((point.x, point.y))
        self.work_region = region_num

    def choose_region(self, runner_step, riders):  # 选择获取订单的区域
        ans = []
        if runner_step % 5 == 0:
            for i in range(5):
                if i < REGION_WORK:
                    ans.append(i)
            if len(ans):
                try:
                    if self.learning_type == 0:
                        self.random_point_imit(riders)
                        self.work_region = self.feature[1]
                    else:
                        self.work_region = int(
                            choose_region_llm(self.llm_const_info, ans, self.location, self.positive_reviews_num,
                                              self.negative_reviews_num))
                    print("选择区域成功")
                except Exception:
                    print("llm选择商区失败，已随机选择商区")
                    self.work_region = random.choice(ans)
        return self.work_region

    def info_get(self, runner_step):
        status_now = self.work_status(runner_step)
        return [self.pt.x, self.pt.y, runner_step, status_now, self.money]