import torch
import torch.nn as nn
import torch.optim as optim
import random
import numpy as np
from collections import deque
from Sequence.sequence_model import SequenceGameRule as GameRule
from agents.t_069.dqntest import myAgent  # 替换为你 DQN 代理的路径
from template import Agent
import os
import copy

# 超参数
EPISODES = 5000
MAX_STEPS = 100
BATCH_SIZE = 64
GAMMA = 0.99
EPSILON_START = 1.0
EPSILON_END = 0.1
EPSILON_DECAY = 0.995
LR = 1e-4
TARGET_UPDATE = 10
MEMORY_SIZE = 10000

DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")

# 简单 DQN 网络（你也可以自定义更复杂的）
class DQN(nn.Module):
    def __init__(self, input_dim, output_dim):
        super(DQN, self).__init__()
        self.net = nn.Sequential(
            nn.Linear(input_dim, 256),
            nn.ReLU(),
            nn.Linear(256, 128),
            nn.ReLU(),
            nn.Linear(128, output_dim)
        )

    def forward(self, x):
        return self.net(x)

# 状态编码函数（需根据你游戏状态结构自定义）
def encode_state(state, agent_id):
    # 你可以替换为更丰富的特征编码
    chips = state.board.chips
    own = state.agents[agent_id].colour
    opp = state.agents[agent_id].opp_colour

    own_map = [[1 if c == own else 0 for c in row] for row in chips]
    opp_map = [[1 if c == opp else 0 for c in row] for row in chips]
    flat_own = np.array(own_map).flatten()
    flat_opp = np.array(opp_map).flatten()

    return np.concatenate([flat_own, flat_opp])

# Replay Buffer
class ReplayMemory:
    def __init__(self, capacity):
        self.memory = deque(maxlen=capacity)

    def push(self, transition):
        self.memory.append(transition)

    def sample(self, batch_size):
        return random.sample(self.memory, batch_size)

    def __len__(self):
        return len(self.memory)

# 主训练函数
def train():
    rule = GameRule(2)
    agent = myAgent(0)
    opponent = myAgent(1)

    state_dim = 200  # 10x10x2，简单状态表示
    action_dim = 100  # 最多 100 种动作（需要做映射）

    policy_net = DQN(state_dim, action_dim).to(DEVICE)
    target_net = DQN(state_dim, action_dim).to(DEVICE)
    target_net.load_state_dict(policy_net.state_dict())
    target_net.eval()

    optimizer = optim.Adam(policy_net.parameters(), lr=LR)
    memory = ReplayMemory(MEMORY_SIZE)

    epsilon = EPSILON_START

    for episode in range(EPISODES):
        rule = GameRule(num_of_agent=2)
        state = rule.current_game_state
        total_reward = 0

        for t in range(MAX_STEPS):
            actions = rule.getLegalActions(state, agent.id)
            if not actions:
                break

            # 动作索引映射（简单 hash）
            action_map = {i: a for i, a in enumerate(actions)}
            state_vec = torch.FloatTensor(encode_state(state, agent.id)).unsqueeze(0).to(DEVICE)

            # ε-greedy 策略
            if random.random() < epsilon:
                action_idx = random.choice(list(action_map.keys()))
            else:
                with torch.no_grad():
                    q_values = policy_net(state_vec)
                    valid_q = q_values[0][list(action_map.keys())]
                    action_idx = list(action_map.keys())[torch.argmax(valid_q).item()]

            action = action_map[action_idx]
            next_state = rule.generateSuccessor(copy.deepcopy(state), action, agent.id)

            reward = next_state.agents[agent.id].score - state.agents[agent.id].score
            done = reward >= 100  # 自定义结束条件

            next_vec = torch.FloatTensor(encode_state(next_state, agent.id)).unsqueeze(0).to(DEVICE)
            memory.push((state_vec, action_idx, reward, next_vec, done))

            state = next_state
            total_reward += reward

            # 学习
            if len(memory) >= BATCH_SIZE:
                transitions = memory.sample(BATCH_SIZE)
                batch_state, batch_action, batch_reward, batch_next, batch_done = zip(*transitions)

                batch_state = torch.cat(batch_state)
                batch_action = torch.LongTensor(batch_action).unsqueeze(1).to(DEVICE)
                batch_reward = torch.FloatTensor(batch_reward).unsqueeze(1).to(DEVICE)
                batch_next = torch.cat(batch_next)
                batch_done = torch.BoolTensor(batch_done).unsqueeze(1).to(DEVICE)

                q_values = policy_net(batch_state).gather(1, batch_action)
                with torch.no_grad():
                    max_next_q = target_net(batch_next).max(1)[0].unsqueeze(1)
                    target = batch_reward + GAMMA * max_next_q * (~batch_done)

                loss = nn.MSELoss()(q_values, target)
                optimizer.zero_grad()
                loss.backward()
                optimizer.step()

            if done:
                break

        if episode % TARGET_UPDATE == 0:
            target_net.load_state_dict(policy_net.state_dict())

        epsilon = max(EPSILON_END, epsilon * EPSILON_DECAY)
        print(f"Episode {episode}, Total reward: {total_reward:.2f}, Epsilon: {epsilon:.3f}")

    # 保存模型
    torch.save(policy_net.state_dict(), "dqn_model.pt")
    print("Training complete. Model saved to dqn_model.pt.")

if __name__ == "__main__":
    train()
