import random, math
import torch
import torch.nn.functional as F
from collections import deque, namedtuple
import numpy as np
from model import QNet
from config import *

Transition = namedtuple('Trans', 's a r s_ done')

class ReplayBuffer:
    def __init__(self, capacity):
        self.buf = deque(maxlen=capacity)
    def push(self, *args):
        self.buf.append(Transition(*args))

    def sample(self, batch):
        batch = random.sample(self.buf, batch)
        s, a, r, s_, done = zip(*batch)
        return (np.array(s),
                np.array(a),  # shape=(batch,)
                np.array(r),  # shape=(batch,)
                np.array(s_),
                np.array(done))  # shape=(batch,)
    def __len__(self):
        return len(self.buf)

class DQNAgent:
    def __init__(self, n_state, n_action):
        self.n_state = n_state
        self.n_action = n_action

        self.q_net = QNet(n_state, n_action).to(DEVICE)
        self.tgt_net = QNet(n_state, n_action).to(DEVICE)
        self.tgt_net.load_state_dict(self.q_net.state_dict())

        self.optim = torch.optim.Adam(self.q_net.parameters(), lr=LR)
        self.memory = ReplayBuffer(MEMORY_SIZE)
        self.expert = ReplayBuffer(MEMORY_SIZE)

        self.steps = 0

    def act(self, state, eps=None):
        if eps is None:
            eps = EPS_END + (EPS_START - EPS_END) * \
                  math.exp(-1. * self.steps / EPS_DECAY)
        if random.random() < eps:
            return random.randint(0, self.n_action - 1)
        x = torch.tensor(state, device=DEVICE, dtype=torch.float32).unsqueeze(0)
        with torch.no_grad():
            return self.q_net(x).argmax(1).item()

    def store(self, *transition, expert=False):
        if expert:
            self.expert.push(*transition)
        else:
            self.memory.push(*transition)

    def update(self):
        if len(self.memory) + len(self.expert) < BATCH:
            return

        n_exp = int(BATCH * EXPERT_RATIO)
        n_norm = BATCH - n_exp
        if len(self.expert) < n_exp:
            n_exp, n_norm = len(self.expert), BATCH - len(self.expert)

        # 采样
        if n_norm:
            ns, na, nr, ns_, nd = self.memory.sample(n_norm)
        else:
            ns = ns_ = np.empty((0, self.n_state))
            na = nr = nd = np.empty((0,))  # 一维空数组

        if n_exp:
            es, ea, er, es_, ed = self.expert.sample(n_exp)
        else:
            es = es_ = np.empty((0, self.n_state))
            ea = er = ed = np.empty((0,))  # 一维空数组

        # 拼接
        batch_s = np.concatenate([ns, es])
        batch_a = np.concatenate([na, ea])
        batch_r = np.concatenate([nr, er])
        batch_s_ = np.concatenate([ns_, es_])
        batch_d = np.concatenate([nd, ed])

        # 以下张量化、训练代码保持不变
        s = torch.tensor(batch_s, device=DEVICE, dtype=torch.float32)
        a = torch.tensor(batch_a, device=DEVICE, dtype=torch.long).unsqueeze(1)
        r = torch.tensor(batch_r, device=DEVICE, dtype=torch.float32).unsqueeze(1)
        s_ = torch.tensor(batch_s_, device=DEVICE, dtype=torch.float32)
        done = torch.tensor(batch_d, device=DEVICE, dtype=torch.bool).unsqueeze(1)

        q = self.q_net(s).gather(1, a)
        with torch.no_grad():
            q_next = self.tgt_net(s_).max(1, keepdim=True)[0]
            q_target = r + GAMMA * q_next * (~done)
        loss = F.mse_loss(q, q_target)

        self.optim.zero_grad()
        loss.backward()
        self.optim.step()

        self.steps += 1
        if self.steps % UPDATE_TARGET == 0:
            self.tgt_net.load_state_dict(self.q_net.state_dict())