from __future__ import annotations

import math
import random
import numpy as np
from typing import Optional, Dict, Any
import time
import uuid

# 导入我们之前创建的ModelManager和相关类
from src.beckend.model_manager import ModelManager, Experience


# ['ground', 'wall', 'warp', 'chest']
class Block:
    def __init__(self, id: int):
        self.id = id

    def interact(self, target: Entity) -> None:
        if self.id == 2:
            target.add_health(-10)
        elif self.id == 3:
            self.id = 0
            if isinstance(target, Agent):
                target.score += 1
                target.health += 10
                target.attack += 5
                target.last_reward += 40  # 开宝箱奖励


class Entity:
    def __init__(self, health: float, attack: float, position: tuple[int, int]):
        self.health = health
        self.attack = attack
        self.position = position
        self.is_dead = False

    def update(self, env: GameEnvironment) -> None:
        pass

    def _attack_entity(self, target: Entity) -> None:
        target.health -= self.attack
        if isinstance(target, Agent):
            target.last_reward -= self.attack * 0.5
        if target.health <= 0:
            target.health = 0
            target.is_dead = True
            if isinstance(target, Agent):
                target.last_reward -= 50

    def add_health(self, health: float) -> None:
        self.health += health
        if isinstance(self, Agent) and health < 0:
            self.last_reward -= health * 0.3
        if self.health <= 0:
            self.health = 0
            self.is_dead = True
            if isinstance(self, Agent):
                self.last_reward -= 50

    def get_walk_valid_directions(self, env: GameEnvironment) -> list[int]:
        x, y = self.position
        res = []
        if env.check_pos_valid(x + 1, y):
            res.append(0)
        if env.check_pos_valid(x, y + 1):
            res.append(1)
        if env.check_pos_valid(x - 1, y):
            res.append(2)
        if env.check_pos_valid(x, y - 1):
            res.append(3)
        return res

    def walk(self, env: GameEnvironment, dir: int) -> bool:
        """移动并返回是否成功移动"""
        new_x, new_y = self.position
        if dir % 2 == 0:
            new_x = self.position[0] + 1 - dir
        else:
            new_y = self.position[1] + 2 - dir

        # 检查移动是否有效
        if not env.check_pos_valid(new_x, new_y):
            return False

        self.position = (new_x, new_y)
        block = env.get_block(self.position[0], self.position[1])
        block.interact(self)
        return True


class Enemy(Entity):
    def __init__(self, health: float, attack: float, position: tuple[int, int]):
        super().__init__(health, attack, position)

    def update(self, env: GameEnvironment) -> None:
        agent_pos = env.agent.position
        if self.position[0] == agent_pos[0] and (
                self.position[1] == agent_pos[1] + 1 or self.position[1] == agent_pos[1] - 1):
            self._attack_entity(env.agent)
        elif self.position[1] == agent_pos[1] and (
                self.position[0] == agent_pos[0] + 1 or self.position[0] == agent_pos[0] - 1):
            self._attack_entity(env.agent)
        else:
            valid_dirs = self.get_walk_valid_directions(env)
            if valid_dirs:  # 确保有有效方向
                dir = valid_dirs[random.randint(0, len(valid_dirs) - 1)]
                self.walk(env, dir)


class Agent(Entity):
    def __init__(self, health: float, attack: float, position: tuple[int, int],
                 model_manager: Optional[ModelManager] = None):
        super().__init__(health, attack, position)
        self.score = 0
        self.model_manager = model_manager
        self.last_action = None
        self.last_state = None
        self.last_reward = 0
        self.rewards = []
        self.episode_id = str(uuid.uuid4())
        self.visited_positions: dict[tuple, int] = {}

    def get_jump_valid_directions(self, env: GameEnvironment) -> list[int]:
        x, y = self.position
        res = []
        if env.check_pos_soft_valid(x + 1, y) and env.check_pos_valid(x + 2, y):
            res.append(0)
        if env.check_pos_soft_valid(x, y + 1) and env.check_pos_valid(x, y + 2):
            res.append(1)
        if env.check_pos_soft_valid(x - 1, y) and env.check_pos_valid(x - 2, y):
            res.append(2)
        if env.check_pos_soft_valid(x, y - 1) and env.check_pos_valid(x, y - 2):
            res.append(3)
        return res

    def update(self, env: GameEnvironment) -> None:
        if self.model_manager is None:
            # 如果没有模型管理器，使用随机动作
            valid_dirs = self.get_walk_valid_directions(env)
            if valid_dirs:
                action = valid_dirs[random.randint(0, len(valid_dirs) - 1)]
                self.walk(env, action)
            return

        # 保存当前状态作为旧状态
        old_state = EnvironmentAdapter.state_to_vector(env)
        self.last_state = old_state

        # 使用模型管理器做出决策
        action = self.model_manager.make_decision(env, old_state)
        self.last_action = action

        # 执行动作
        action_success = False
        if action < 4:  # 移动动作
            action_success = self.walk(env, action)
        elif action < 8:  # 攻击动作
            # 随机选择一个方向攻击
            action_success = self.attack_entity(env, action - 4)
        elif action < 12:  # 跳跃动作
            action_success = self.jump(env, action - 8)

        # 如果动作失败，给予负面奖励
        if not action_success:
            self.last_reward -= 10

    def feed_back(self, env: GameEnvironment) -> None:
        """反馈处理 - 给予生存奖励等"""
        # 每回合生存奖励
        if self.position not in self.visited_positions:
            self.visited_positions[self.position] = 1
            self.last_reward += 2.5 + 6 / (self.get_nearest_chest_dis(env) ** 2)
        else:
            self.visited_positions[self.position] += 1
            self.last_reward -= 0.2 + self.visited_positions[self.position] * 0.4

        if env.model_manager.training_mode:
            new_state = EnvironmentAdapter.state_to_vector(env)
            experience = Experience(
                oldState=self.last_state,
                newState=new_state,
                action=self.last_action,
                envReward=self.last_reward,
                episode_id=self.episode_id,
                round_number=env.cur_round
            )
            self.model_manager.commit_experience(experience)
        # 重置奖励
        self.rewards.append(self.last_reward)
        self.last_reward = 0

    def attack_entity(self, env: GameEnvironment, dir: int) -> bool:
        """攻击并返回是否成功攻击"""
        x, y = self.position
        if dir % 2 == 0:
            x += 1 - dir
        else:
            y += 2 - dir

        entity = env.get_entity(x, y)
        if entity is not None:
            self._attack_entity(entity)
            if entity.is_dead:
                self.last_reward += 3  # 击杀奖励
            else:
                self.last_reward += 1  # 攻击奖励
            return True
        return False

    def jump(self, env: GameEnvironment, dir: int) -> bool:
        """跳跃并返回是否成功跳跃"""
        if not (dir in self.get_jump_valid_directions(env)):
            return False

        new_x, new_y = self.position
        if dir % 2 == 0:
            new_x = self.position[0] + 2 - 2 * dir
        else:
            new_y = self.position[1] + 4 - 2 * dir
        self.position = (new_x, new_y)
        block = env.get_block(self.position[0], self.position[1])
        block.interact(self)
        return True

    def get_nearest_chest_dis(self, env: GameEnvironment) -> float:
        min_d = math.inf
        for i, b in enumerate(env.map.blocks):
            if b.id == 3:
                x, y = i % env.map.max_x, i // env.map.max_x
                d = ((self.position[0] - x) ** 2 + (self.position[1] - y) ** 2) ** 0.5
                if d < min_d:
                    min_d = d
        if min_d == math.inf:
            return -1
        return min_d

    def get_nearest_enemy_dis(self, env: GameEnvironment) -> float:
        min_d = math.inf
        for e in env.entities:
            x, y = e.position
            d = ((self.position[0] - x) ** 2 + (self.position[1] - y) ** 2) ** 0.5
            if d < min_d:
                min_d = d
        if min_d == math.inf:
            return -1
        return min_d

    def get_valid_actions(self, env: GameEnvironment) -> list[int]:
        actions = self.get_walk_valid_directions(env)
        for i in range(4):
            x, y = self.position
            if i % 2 == 0:
                x += 1 - i
            else:
                y += 2 - i
            entity = env.get_entity(x, y)
            if entity is not None:
                actions.append(i + 4)
        actions.extend([i + 8 for i in self.get_jump_valid_directions(env)])
        return actions



class Map:
    def __init__(self, maze: list[list[int]]):
        self.max_x: int = len(maze[0])
        self.max_y: int = len(maze)
        self.blocks = [Block(maze[i // self.max_x][i % self.max_x]) for i in range(self.max_x * self.max_y)]

    @staticmethod
    def random_map(w: int = 17, h: int = 17, p_chest: float = 0.08, p_trap: float = 0.08) -> Map:
        maze = [[1] * w for _ in range(h)]
        walls = []

        def add_walls(x, y):
            for dx, dy in [(0, -1), (0, 1), (-1, 0), (1, 0)]:
                nx, ny = x + dx, y + dy
                if w > nx >= 0 and 0 <= ny < h and maze[ny][nx] == 1:
                    walls.append((x, y, nx, ny))

        start_x, start_y = random.randrange(0, w, 2), random.randrange(0, h, 2)
        maze[start_y][start_x] = 0
        add_walls(start_x, start_y)
        while walls:
            x, y, nx, ny = walls.pop(random.randint(0, len(walls) - 1))
            if maze[ny][nx] == 1:
                ex, ey = nx + (nx - x), ny + (ny - y)
                if 0 <= ex < w and 0 <= ey < h and maze[ey][ex] == 1:
                    maze[ny][nx] = maze[ey][ex] = 0
                    add_walls(ex, ey)
        zero_positions = []
        for i, row in enumerate(maze):
            for j, element in enumerate(row):
                if element == 1:
                    zero_positions.append((i, j))
        for pos in random.sample(zero_positions, round(len(zero_positions) * 0.4)):
            maze[pos[0]][pos[1]] = 0
        zero_positions.clear()
        for i, row in enumerate(maze):
            for j, element in enumerate(row):
                if element == 0:
                    zero_positions.append((i, j))
        for pos in random.sample(zero_positions, round(len(zero_positions) * p_chest)):
            maze[pos[0]][pos[1]] = 3
        zero_positions.clear()
        for i, row in enumerate(maze):
            for j, element in enumerate(row):
                if element == 0:
                    zero_positions.append((i, j))
        for pos in random.sample(zero_positions, round(len(zero_positions) * p_trap)):
            maze[pos[0]][pos[1]] = 2
        return Map(maze)

    def get_chest_count(self) -> int:
        res = 0
        for b in self.blocks:
            if b.id == 3:
                res += 1
        return res

    def get_ground_positions(self) -> list[tuple[int, int]]:
        res = []
        for i in range(len(self.blocks)):
            if self.blocks[i].id == 0:
                res.append((i % self.max_x, i // self.max_y))
        return res


class GameEnvironment:
    def __init__(self, model_manager: Optional[ModelManager] = None):
        self.map: Map | None = None
        self.agent: Agent | None = None
        self.entities: list[Entity] = []
        self.cur_round = 0
        self.model_manager = model_manager
        self.max_round = 100

    def check_pos_valid(self, x: int, y: int) -> bool:
        if x < 0 or x >= self.map.max_x or y < 0 or y >= self.map.max_y:
            return False
        if self.map.blocks[y * self.map.max_x + x].id == 1:
            return False
        if self.agent.position[0] == x and self.agent.position[1] == y:
            return False
        for e in self.entities:
            if e.position[0] == x and e.position[1] == y:
                return False
        return True

    def check_pos_soft_valid(self, x: int, y: int) -> bool:
        if x < 0 or x >= self.map.max_x or y < 0 or y >= self.map.max_y:
            return False
        if self.map.blocks[y * self.map.max_x + x].id == 1:
            return False
        return True

    def get_block(self, x: int, y: int) -> Block:
        # 添加边界检查
        if x < 0 or x >= self.map.max_x or y < 0 or y >= self.map.max_y:
            return Block(1)  # 返回墙块
        return self.map.blocks[y * self.map.max_x + x]

    def get_entity(self, x: int, y: int) -> Entity | None:
        if self.agent.position == (x, y):
            return self.agent
        for e in self.entities:
            if e.position == (x, y):
                return e
        return None

    def step(self) -> None:
        self.agent.update(self)
        dead_es = []
        for e in self.entities:
            e.update(self)
            if e.is_dead:
                dead_es.append(e)
        for e in dead_es:
            self.entities.remove(e)
        self.agent.feed_back(self)
        self.cur_round += 1

    def check_over(self) -> bool:
        return self.agent.is_dead or self.map.get_chest_count() == 0 or self.cur_round >= self.max_round

    def run(self, max_rounds: int = 100, render: bool = False) -> Dict[str, Any]:
        """运行游戏并返回结果统计"""
        rounds = 0
        while not self.check_over() and rounds < max_rounds:
            self.step()
            rounds += 1

            if render:
                print(f'round {self.cur_round}\nagent: H {self.agent.health} S {self.agent.score}')
                print(self.print_map())
                time.sleep(0.1)
        self.model_manager.episode_clear(self.agent.episode_id)
        return {
            'rounds': self.cur_round,
            'score': self.agent.score,
            'health': self.agent.health,
            'survived': not self.agent.is_dead,
            'chests_remaining': self.map.get_chest_count()
        }

    def init_map(self, w: int, h: int, num_enemies: int, agent_health: float,
                 agent_attack: float, enemy_health: float, enemy_attack: float, p_chest: float, p_trap: float) -> None:
        self.map = Map.random_map(w, h, p_chest, p_trap)
        grounds = self.map.get_ground_positions()
        index = random.randint(0, len(grounds) - 1)

        # 创建Agent时传入model_manager
        self.agent = Agent(agent_health, agent_attack, grounds.pop(index), self.model_manager)

        self.entities = []
        for i in range(num_enemies):
            if not grounds:
                break
            index = random.randint(0, len(grounds) - 1)
            enemy = Enemy(enemy_health, enemy_attack, grounds.pop(index))
            self.entities.append(enemy)
        self.cur_round = 0

    def print_map(self) -> str:
        content = [str(block.id) for block in self.map.blocks]
        content[self.agent.position[1] * self.map.max_x + self.agent.position[0]] = 'A'
        for e in self.entities:
            content[e.position[1] * self.map.max_x + e.position[0]] = 'E'
        sb = []
        for i, item in enumerate(content):
            sb.append(item + ' ')
            if i % self.map.max_x == self.map.max_x - 1:
                sb.append('\n')
        return ''.join(sb)

    def train(self, num_episodes: int = 100) -> None:
        if self.model_manager is None:
            print("没有设置模型管理器，无法训练")
            return

        for episode in range(num_episodes):
            # 初始化新游戏
            self.init_map(17, 17, 8, 50, 10, 20, 5, 0.2 - 0.14 * (episode / num_episodes), 0.08)  # 减少敌人数量

            # 运行一个episode
            result = self.run(max_rounds=100, render=False)  # 减少最大回合数

            # print(f"Episode {self.agent.episode_id}: "
            #       f"Rounds={result['rounds']}, "
            #       f"Score={result['score']}, "
            #       f"Health={result['health']}, "
            #       f"Survived={result['survived']}")

    def test(self, num_episodes: int = 3) -> None:
        self.model_manager.set_training_state(False)
        for episode in range(num_episodes):
            # 初始化新游戏
            self.init_map(17, 17, 8, 40, 10, 20, 5, 0.08, 0.08)  # 减少敌人数量

            # 运行一个episode
            result = self.run(max_rounds=100, render=False)  # 减少最大回合数

            print(f"\033[91mEpisode {self.agent.episode_id}: "
                  f"Rounds={result['rounds']}, "
                  f"Score={result['score']}, "
                  f"Health={result['health']}, "
                  f"Survived={result['survived']}\033[0m")
        self.model_manager.set_training_state(True)


# 游戏环境适配器
class EnvironmentAdapter:
    """环境适配器 - 连接游戏环境和模型管理器"""

    @staticmethod
    def state_to_vector(env: GameEnvironment) -> tuple[np.ndarray, np.ndarray]:
        """
        将游戏状态转换为状态向量
        """
        agent_position = env.agent.position
        state_vector = np.zeros((17, 17, 6), dtype=np.float32)

        # 通道0: 地图信息
        for y in range(env.map.max_y):
            for x in range(env.map.max_x):
                block = env.get_block(x, y)
                state_vector[y, x, 0] = 1.0 if block.id == 1 else 0.0

        # 通道1: 智能体位置
        x, y = agent_position
        if 0 <= x < 17 and 0 <= y < 17:
            state_vector[y, x, 1] = 1.0

        # 通道2: 敌人位置
        for entity in env.entities:
            if isinstance(entity, Enemy):
                x, y = entity.position
                if 0 <= x < 17 and 0 <= y < 17:
                    state_vector[y, x, 2] = 1.0

        # 通道3: 宝箱位置
        for y in range(env.map.max_y):
            for x in range(env.map.max_x):
                if 0 <= x < env.map.max_x and 0 <= y < env.map.max_y:
                    block = env.get_block(x, y)
                    if block.id == 3:
                        state_vector[y, x, 3] = 1.0

        # 通道4: 陷阱位置
        for y in range(env.map.max_y):
            for x in range(env.map.max_x):
                if 0 <= x < env.map.max_x and 0 <= y < env.map.max_y:
                    block = env.get_block(x, y)
                    if block.id == 2:
                        state_vector[y, x, 4] = 1.0

        for k, v in env.agent.visited_positions.items():
            state_vector[k[1], k[0], 5] = v * 0.2

        return state_vector, np.array([env.agent.health * 0.1, env.agent.attack * 0.1, env.agent.score, env.agent.get_nearest_chest_dis(env) * 0.1, env.agent.get_nearest_enemy_dis(env) * 0.1])

    @staticmethod
    def action_to_command(action: int) -> str:
        action_map = {
            0: 'RIGHT',
            1: 'UP',
            2: 'LEFT',
            3: 'DOWN',
            4: 'ATTACK_RIGHT',
            5: 'ATTACK_UP',
            6: 'ATTACK_LEFT',
            7: 'ATTACK_DOWN',
            8: 'JUMP_RIGHT',
            9: 'JUMP_UP',
            10: 'JUMP_LEFT',
            11: 'JUMP_DOWN',
        }
        return action_map.get(action, 'RIGHT')


# # test run
# if __name__ == "__main__":
#     model_manager = ModelManager(
#         state_shape=(17, 17, 5),
#         action_size=12,
#         use_double_dqn=True,
#         use_dueling=True,
#         use_per=True,
#         memory_size=20000,
#         batch_size=64,
#         target_update_freq=1000,
#         learning_rate=0.001,
#         gamma=0.99
#     )
#
#     # 创建游戏环境并传入模型管理器
#     env = GameEnvironment(model_manager)
#
#     # 先进行简单的测试运行
#     print("进行测试运行...")
#     env.init_map(17, 17, 5, 40, 10, 20, 5)  # 更少的敌人
#     result = env.run(max_rounds=50, render=True)
#     print(f"测试结果: {result}")
#
#     # 训练模式
#     print("\n开始训练模式...")
#     env.train(num_episodes=10)  # 先训练少量episode
#
#     # 评估模式
#     print("\n开始评估模式...")
#     model_manager.set_training_state(False)
#     env.init_map(17, 17, 8, 40, 10, 20, 5)
#     result = env.run(max_rounds=200, render=True)
#     print(f"评估结果: {result}")
