import random
from repast4py.core import Agent
import repast4py.space as space
from repast4py.core import Agent
from repast4py.space import DiscretePoint as dpt
from repast4py.space import ContinuousSpace
from repast4py.space import ContinuousPoint as cpt
from repast4py import context as ctx
from repast4py import schedule, logging
import os, sys
from L2.learning_module import ImitationLearning, EvolutionaryLearning
import numpy as np
import pandas as pd
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.output_parsers import StrOutputParser
from langchain_community.llms import SparkLLM
try:
    from .env import AGENT_MAX_SPEED
except ImportError:
    from env import AGENT_MAX_SPEED

current_directory = os.path.dirname(os.path.abspath(__file__))
file_map = os.path.join(current_directory, 'L1/map.csv')
df_map = pd.read_csv(file_map, header=None)
map_matrix = df_map.values
# 对地图矩阵进行转置
map_matrix = map_matrix.transpose()

class Environment:
    def __init__(self):
        pass

    def step(self, state, action):
        raise NotImplementedError("This method should be implemented by subclass")

    def get_reward(self, current_state, action, next_state):
        raise NotImplementedError("This method should be implemented by subclass")


class MeiTuanEnvironment(Environment):
    def __init__(self, n_locations=600, n_time_segments=3, n_order_density=1, n_rider_states=2,
                 actions=['1', '2', '3', '4', '5'], reduce_ration=20):
        self.n_locations = n_locations
        self.n_time_segments = n_time_segments
        self.n_order_density = n_order_density
        self.n_rider_states = n_rider_states
        self.actions = actions
        self.reduce_ration = reduce_ration

        self.n_states = 2
        self.n_actions = len(actions)

        # Used in the reward function
        self.previous_density = 0

    def step(self, state, action):
        # Implement the logic to transition from current state to next state
        # This is usually environment specific, for simplicity let's just return a random next state for now
        next_state = random.randint(0, self.n_states - 1)
        reward = self.get_reward(state, action, next_state)
        return next_state, reward

    def get_reward(self, current_state, action, next_state):
        # Implement the logic for reward calculation
        # For simplicity let's just return a random reward for now
        return random.randint(-1, 1)


def move_with_direction(pt: dpt, direction):
    next_position = pt
    if direction == 1:
        next_position = dpt(pt.x, pt.y + 1, 0)
    elif direction == -1:
        next_position = dpt(pt.x, pt.y - 1, 0)
    elif direction == 2:
        next_position = dpt(pt.x - 1, pt.y, 0)
    elif direction == -2:
        next_position = dpt(pt.x - 1, pt.y, 0)
    return next_position


def intersperse_fill_positions(move_positions, fill_count, fill_value):
    # 总长度为原始move_positions长度加上需要填充的数量
    total_length = len(move_positions) + fill_count
    # 结果列表
    result_positions = []
    # 计算每个填充元素之间应有的间隔
    interval = total_length / fill_count if fill_count else total_length

    fill_pos = interval  # 下一个填充位置
    for i, move in enumerate(move_positions):
        # 在需要填充的位置添加填充元素
        while i >= fill_pos:
            result_positions.append(fill_value)
            fill_pos += interval
        result_positions.append(move)

    # 确保填充完全，特别是当move_positions较短导致间隔计算不整除时
    while len(result_positions) < total_length:
        result_positions.append(fill_value)

    return result_positions

class Individual(Agent):
    def __init__(self,
                 id: int,
                 feature: list,
                 rank,
                 t,
                 home,
                 il_prob=0.5,
                 alpha=0.1,
                 gamma=0.6,
                 epsilon=0.1,
                 environment=MeiTuanEnvironment,
                 v=20,
                 agent_type=0,
                 sys_template="You are playing a passerby. When you are walking on the road, a knife-wielding gangster appears. " \
                          "There are also 3 policemen and 8 other passersby on the street. You should play your role well and " \
                          "behave in accordance with your role. ",
                 user_template="To ensure your own safety, what will you do now? You have the following three options: 1. Run " \
                        "randomly to avoid the criminals 2. Run to the nearest police for help 3. Run to a safe location.You " \
                        "only need to answer 1, 2, 3 to represent your choice. "):
        super().__init__(id=id, rank=rank, type=t)

        self.feature = feature  # 模仿学习
        self.prob_il = il_prob
        self.feature_effect = [1 for _ in range(len(self.feature))]

        self.ans = 0  # Initialize ans value
        self.reward = 0
        self.pt = dpt(home[0], home[1], 0)
        self.radius = 3
        self.direction = 1
        self.speed = v
        self.neighbors = []
        self.move_positions = []

        self.environment = environment  # 强化学习参数
        self.alpha = alpha  # Learning rate
        self.gamma = gamma  # Discount factor
        self.epsilon = epsilon  # Exploration rate
        self.q_table = None
        # 大模型key 提示词设计
        os.environ["IFLYTEK_SPARK_APP_ID"] = "55713da8"
        os.environ["IFLYTEK_SPARK_API_SECRET"] = "NzI1OThjNDI0ODM5M2NiODBhY2NlYjFj"
        os.environ["IFLYTEK_SPARK_API_KEY"] = "966252a76081be0e92e8fb0d00e7c858"

        os.environ["LANGCHAIN_TRACING_V2"] = "true"
        os.environ["LANGCHAIN_API_KEY"] = "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"

        os.environ["QIANFAN_AK"] = "H3cep7ar3Php6fNcNonpAubc"
        os.environ["QIANFAN_SK"] = "k5CCykaXcU80DfSdFf1iE3jAPm3iIWTY"

        self.prompt_template = ChatPromptTemplate.from_messages([
            ('system', sys_template),
            ('user', user_template)
        ])
        # 2. Create model
        self.model = SparkLLM()
        # 3. Create parser
        self.parser = StrOutputParser()

    def get_response(self):
        # Create chain
        chain = self.prompt_template | self.model
        result = chain.invoke({})

        if "1" in result:
            return 1
        if "2" in result:
            return 2
        if "3" in result:
            return 3

    def get_reward(self):
        pass

    # 上下前进
    def go_up_down(self, steps):
        if abs(self.direction) != 1:
            self.direction = random.choice([1, -1])
        # 移动self.speed步
        for i in range(steps):
            # print("移动步数：", i)
            next_position = move_with_direction(self.pt, self.direction)
            if map_matrix[next_position.x][next_position.y] == 1:
                self.pt = dpt(next_position.x, next_position.y, 0)
                self.move_positions.append([next_position.x, next_position.y])
            else:
                self.direction = random.choice([2, -2])
                try_position = move_with_direction(self.pt, self.direction)
                if map_matrix[try_position.x][try_position.y] != 1:
                    self.direction = self.direction * (-1)

    # 左右前进
    def go_left_right(self, steps):
        if abs(self.direction) == 2:
            pass
        else:
            self.direction = random. choice([2, -2])
        # 移动self.speed步
        for i in range(steps):
            next_position = move_with_direction(self.pt, self.direction)
            if map_matrix[next_position.x][next_position.y] == 1:
                self.pt = dpt(next_position.x, next_position.y, 0)
                self.move_positions.append([next_position.x, next_position.y])
            else:
                self.direction = random.choice([1, -1])
                try_position = move_with_direction(self.pt, self.direction)
                if map_matrix[try_position.x][try_position.y] != 1:
                    self.direction = self.direction * (-1)

    # 骑手随机游走
    def random_walk_regular(self):
        # 随机游走，1表示向上，-1表示向下，2表示向左，-2表示向右
        direction_list = [1, -1, 2, -2]
        # 判断自己是在横向马路还是纵向马路
        current_x = self.pt.x
        current_y = self.pt.y
        current_up = map_matrix[current_x][current_y + 1]
        current_down = map_matrix[current_x][current_y - 1]
        current_left = map_matrix[current_x - 1][current_y]
        current_right = map_matrix[current_x + 1][current_y]

        if current_up + current_down == 2 and current_left + current_right == 2:  # 四岔路口
            # print("四岔路口")
            direction_list.remove(self.direction * (-1))
            while 1:
                self.direction = random.choice(direction_list)
                try_position = move_with_direction(self.pt, self.direction)
                if map_matrix[try_position.x][try_position.y] == 1:
                    break

            if abs(self.direction) == 1:
                self.go_up_down(self.speed)
            else:
                self.go_left_right(self.speed)

        elif current_up + current_down == 2:  # 纵向马路
            # print("纵向马路")
            self.go_up_down(self.speed)

        elif current_left + current_right == 2:  # 横向马路
            # print("横向马路")
            self.go_left_right(self.speed)

        else:  # 在拐角处
            # print("*"*20 + "在拐角处" + "*"*20)
            # 选择一个合适的方向移动self.speed步
            while 1:
                self.direction = random.choice(direction_list)
                try_position = move_with_direction(self.pt, self.direction)
                if map_matrix[try_position.x][try_position.y] == 1:
                    break

            if abs(self.direction) == 1:
                self.go_up_down(self.speed)
            else:
                self.go_left_right(self.speed)

        #  均匀填充调整move_positions
        remaining_fill = 20 - len(self.move_positions)
        if len(self.move_positions) < AGENT_MAX_SPEED:
            self.move_positions.extend([[-1, 0]] * remaining_fill)


    def imit_step(self):
        learning = ImitationLearning(self, self.neighbors, self.prob_il)
        learning.set_feature_effect(self.feature_effect)
        index = learning.individual_imitation()
        return index

    def choose_action(self, state):
        if random.uniform(0, 1) < self.epsilon:
            return random.choice(range(self.environment.n_actions))
        else:
            return np.argmax(self.q_table[state, :])

    def rl(self, status, action, reward, next_status):
        old_value = self.q_table[status, action]
        next_max = np.max(self.q_table[next_status, :])
        self.q_table[status, action] = old_value + self.alpha * (reward + self.gamma * next_max - old_value)


if __name__ == "__main__":
    # 创建10个agents
    agents = [Individual(i, [1, 2, i], 1, 1, (62, 112)) for i in range(10)]

    # 执行强化学习过程
    for step in range(10):
        print(f"Step {step + 1}")
        for agent in agents:
            agent.random_walk_regular()
            # print(f"Agent {agent.id} - Reward: {reward}, Features: {agent.feature}, Ans: {agent.ans}")
        print("")  # 添加一个空行来分隔步骤