import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np
import random
from Utils.tensor_ops import _2tensor
from collections import namedtuple
from Utils.tensor_ops import pt_inf
from torch.distributions.categorical import Categorical

# 定义经验回放缓冲区
Transition = namedtuple('Transition', ('state', 'action', 'next_state', 'reward', 'done'))


class ReplayBuffer:
    def __init__(self, capacity):
        self.capacity = capacity
        self.memory = []
        self.position = 0

    def push(self, *args):
        if len(self.memory) < self.capacity:
            self.memory.append(None)
        self.memory[self.position] = Transition(*args)
        self.position = (self.position + 1) % self.capacity

    def sample(self, batch_size):
        return random.sample(self.memory, batch_size)

    def __len__(self):
        return len(self.memory)



# 定义Q网络
class QNetwork(nn.Module):
    def __init__(self, input_dim, output_dim):
        super(QNetwork, self).__init__()
        self.fc1 = nn.Linear(input_dim, 512)
        self.fc2 = nn.Linear(512, 256)
        self.fc3 = nn.Linear(256, output_dim)

    def forward(self, x):
        x = torch.relu(self.fc1(x))
        x = torch.relu(self.fc2(x))
        x = self.fc3(x)
        return x


# 定义QMIX算法
class QMIX(nn.Module):
    def __init__(self, num_agents, state_dim, action_space, device):
        super().__init__()
        self.num_agents = num_agents
        self.state_dim = state_dim
        self.action_space = action_space
        self.device = device
        self.num_thread = 0
        # 初始化Q网络
        self.q_networks = torch.nn.ModuleList(
            modules=[QNetwork(self.state_dim, self.action_space) for _ in range(self.num_agents)])
        # 初始化经验回放缓冲区
        self.replay_buffer = ReplayBuffer(capacity=10000)

        # 初始化优化器
        self.optimizers = [optim.Adam(q_net.parameters(), lr=0.001) for q_net in self.q_networks]
        self.optimizers_dict = {'optimizer' + str(index + 1): optimizer for index, optimizer in
                                enumerate(self.optimizers)}

    def select_action(self, states, test_mode, avail_act, epsilon=0.1):
        flag = True
        q_value = None
        self.num_thread = states.shape[0]
        action_list = torch.empty(1, self.num_agents).to(self.device)
        for i in range(self.num_thread):
            state_thread = states[i].reshape(self.num_agents, self.state_dim)
            avail_act_thread = torch.tensor(avail_act[i],dtype=torch.int64).to(self.device)
            for q_network, state in zip(self.q_networks, state_thread):

                state_tensor = torch.tensor(state, dtype=torch.float32).to(self.device)
                state_tensor = torch.nan_to_num_(state_tensor, 0).to(self.device)

                if random.random() < 0.01:
                    action = random.randint(0, self.action_space - 1)
                    q_values = torch.zeros(self.action_space).to(self.device)
                    q_values[action] = 1
                else:
                    with torch.no_grad():
                        q_values = q_network(state_tensor).to(self.device)
                        # action = q_values.argmax().item()
                if flag:
                    q_value = q_values.unsqueeze(0)
                    flag = False
                else:
                    q_value = torch.cat((q_value, q_values.unsqueeze(0)))
            if avail_act is not None: q_value = torch.where(avail_act_thread > 0, q_value, -pt_inf()).to(self.device)

            act_dist = Categorical(logits=q_value.unsqueeze(0))
            if not test_mode:
                actions = act_dist.sample()
            else:
                actions = torch.argmax(act_dist.probs, dim=2)

            if i == 0:
                action_list = actions
            else:
                action_list = torch.cat((action_list, actions), 0)

            q_value = None
            flag = True

        return action_list.cpu().numpy()

    def evaluate_actions(self, *args, **kargs):
        return self._update(*args, **kargs)

    def _update(self, state_batch, action_batch, avail_act_batch, next_state_batch, reward_batch, done_batch, gamma,
                max_grad_norm):
        # if len(self.replay_buffer) < batch_size:
        #     return

        # transitions = self.replay_buffer.sample(batch_size)
        # batch = Transition(*zip(*transitions))
        loss_list = []

        for i in range(self.num_agents):
            state = state_batch[:, i, :, :].reshape(state_batch.shape[0], -1)
            next_state = next_state_batch[:, i, :, :].reshape(next_state_batch.shape[0], -1)
            state = torch.nan_to_num_(state, 0)
            next_state = torch.nan_to_num_(next_state, 0)
            action = action_batch[:, i].to(torch.int64)
            avail_act = avail_act_batch[:, i, :]
            reward = reward_batch[:, i]
            done = done_batch[:, i]

            with torch.no_grad():
                q_values_next = self.q_networks[i](next_state)
                if avail_act is not None: q_values_next = torch.where(avail_act > 0, q_values_next, -pt_inf())
                q_values_next = q_values_next.max(1)[0]
                q_targets = reward + gamma * q_values_next * ~done

            q_values = self.q_networks[i](state)
            loss = nn.functional.l1_loss(q_values.gather(1, action.unsqueeze(1)), q_targets.unsqueeze(1))
            loss_list.append(loss.item())

            self.optimizers[i].zero_grad()
            loss.backward()
            nn.utils.clip_grad_norm_(self.parameters(), max_grad_norm)
            self.optimizers[i].step()

        mean_loss = np.array(loss_list).sum() / (state_batch.shape[0] * state_batch.shape[1])
        return mean_loss

    def store_transition(self, transition):
        self.replay_buffer.push(*transition)
