from template import Agent
from Sequence.sequence_model import SequenceGameRule as GameRule
import torch
import torch.nn as nn
import numpy as np
import random
import os

NUM_PLAYERS = 2
DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")

# DQN 网络结构（应与训练时保持一致）
class DQN(nn.Module):
    def __init__(self, input_dim=200, output_dim=100):
        super(DQN, self).__init__()
        self.net = nn.Sequential(
            nn.Linear(input_dim, 256),
            nn.ReLU(),
            nn.Linear(256, 128),
            nn.ReLU(),
            nn.Linear(128, output_dim)
        )

    def forward(self, x):
        return self.net(x)

# 状态编码函数
def encode_state(state, agent_id):
    chips = state.board.chips
    own = state.agents[agent_id].colour
    opp = state.agents[agent_id].opp_colour

    own_map = [[1 if c == own else 0 for c in row] for row in chips]
    opp_map = [[1 if c == opp else 0 for c in row] for row in chips]
    flat_own = np.array(own_map).flatten()
    flat_opp = np.array(opp_map).flatten()

    return np.concatenate([flat_own, flat_opp])

# Agent 实现
class myAgent(Agent):
    def __init__(self, _id):
        super().__init__(_id)
        self.id = _id
        self.game_rule = GameRule(NUM_PLAYERS)

        # 初始化 DQN 模型并加载参数
        self.policy_net = DQN().to(DEVICE)
        model_path = "dqn_model.pt"
        if os.path.exists(model_path):
            self.policy_net.load_state_dict(torch.load(model_path, map_location=DEVICE))
            self.policy_net.eval()
            print(f"[Agent {self.id}] Loaded model from {model_path}")
        else:
            print(f"[Agent {self.id}] Warning: DQN model file not found, using random actions")

    def SelectAction(self, actions, game_state):
        if not actions:
            return None

        # 如果模型未加载成功，随机选择动作
        if not hasattr(self.policy_net, 'net'):
            return random.choice(actions)

        # 编码当前状态
        state_vec = torch.FloatTensor(encode_state(game_state, self.id)).unsqueeze(0).to(DEVICE)

        # 构造动作映射表（动作索引 → 动作）
        action_map = {i: a for i, a in enumerate(actions)}
        action_indices = list(action_map.keys())

        # 网络前向推理
        with torch.no_grad():
            q_values = self.policy_net(state_vec)[0]
            valid_q_values = q_values[action_indices]
            best_index = action_indices[torch.argmax(valid_q_values).item()]
            best_action = action_map[best_index]

        return best_action
