import copy
import random
from collections import deque
from pathlib import Path

import gymnasium as gym
from matplotlib import pyplot as plt

plt.switch_backend("agg")
import numpy as np
import torch
from torch import nn, optim
from torch.utils import tensorboard as tb
from tqdm import tqdm
from typing import *


class QNet(nn.Module):
    """Q网络"""

    def __init__(self, state_dim: int, hidden_dim: int, action_dim: int) -> None:
        super().__init__()
        self.fc1 = nn.Linear(state_dim, hidden_dim)
        self.nonlinear = nn.ReLU()
        self.fc2 = nn.Linear(hidden_dim, action_dim)

    def forward(self, state_tensor: torch.Tensor, embedding_tensor: torch.Tensor = None):
        if embedding_tensor is not None:
            if state_tensor.ndim > 1:
                embedding_tensor = embedding_tensor.repeat(*state_tensor.shape[:-1], 1)
            x = torch.cat((state_tensor, embedding_tensor), dim=-1)
        else:
            x = state_tensor
        x = self.fc1(x)
        x = self.nonlinear(x)
        x = self.fc2(x)
        return x


class ReplayBuffer:
    """经验池"""

    def __init__(self, capicity: int) -> None:
        self.capicity = capicity
        self.buffer = deque(maxlen=self.capicity)

    @property
    def size(self):
        return len(self.buffer)

    def push(self, s, a, r, next_s, t):
        if self.size == self.capicity:
            self.buffer.popleft()
        self.buffer.append([s, a, r, next_s, t])

    def is_full(self):
        return self.size == self.capicity

    def sample(self, N: int, device: str = "cpu"):
        """采样数据并打包"""
        assert N <= self.size, "batch is too big"
        samples = random.sample(self.buffer, N)
        states, actions, rewards, next_states, terminateds = zip(*samples)
        return (
            torch.from_numpy(np.vstack(states)).float().to(device),
            torch.from_numpy(np.vstack(actions)).type(torch.int64).to(device),
            torch.from_numpy(np.vstack(rewards)).float().to(device),
            torch.from_numpy(np.vstack(next_states)).float().to(device),
            torch.from_numpy(np.vstack(terminateds)).float().to(device),
        )


class DQN:
    """单个DQN智能体"""

    def __init__(
        self,
        env: str,
        heter: float,
        env_index: int,
        embedding_init: np.ndarray = None,
        hidden_dim: int = 128,
        buffer_capicity: int = 10000,
        buffer_init_ratio: float = 0.30,
        gamma: float = 0.98,
        lr: float = 0.007,
        batch_size: int = 64,
        update_interval: int = 5,
        save_dir: str = None,
        save_interval: int = 100,
        train_batchs: int = None,
        device: str = "cpu",
    ) -> None:
        self.env = gym.make(env, heter=heter)
        self.env_index = env_index
        self.state_dim = self.env.observation_space.shape[0]
        self.action_dim = self.env.action_space.n
        self.replay_buffer = ReplayBuffer(buffer_capicity)
        self.gamma = gamma
        self.batch_size = batch_size
        self.update_interval = update_interval
        self.device = device
        if embedding_init is not None:
            self.embedding = torch.from_numpy(embedding_init).float().to(self.device).requires_grad_(True)
            self.q_net = QNet(self.state_dim + self.embedding.shape[0], hidden_dim, self.action_dim).to(self.device)
            self.q_target = QNet(self.state_dim + self.embedding.shape[0], hidden_dim, self.action_dim).to(self.device)
            self.optimizer = optim.Adam([{"params": self.q_net.parameters(), "lr": lr}])
            self.optimizer.add_param_group({"params": self.embedding, "lr": lr / self.batch_size * 10})
        else:
            self.embedding = None
            self.q_net = QNet(self.state_dim, hidden_dim, self.action_dim).to(self.device)
            self.q_target = QNet(self.state_dim, hidden_dim, self.action_dim).to(self.device)
            self.optimizer = optim.Adam([{"params": self.q_net.parameters(), "lr": lr}])
            self.q_target.load_state_dict(self.q_target.state_dict())

        # 训练时使用
        assert save_dir is not None, "save dir can't be empty"
        self.save_dir = Path(save_dir)
        self.save_dir.mkdir(parents=True, exist_ok=True)
        self.save_interval = save_interval
        self.logger = tb.SummaryWriter(log_dir=self.save_dir / "log")
        self.n_train = 0
        self.global_train_batchs = train_batchs
        self.episode = 0
        self.episode_reward = 0
        self.episode_reward_list = []
        self.state, _ = self.env.reset()
        self.collect_exp_before_train(buffer_init_ratio)

    def collect_exp_before_train(self, ratio: float):
        """在训练开始前随机采样动作, 向经验池里面存入一定经验数据"""
        state, _ = self.env.reset()
        assert 0 < ratio < 1.0, "ratio setting error!"
        while self.replay_buffer.size < ratio * self.replay_buffer.capicity:
            action = np.random.choice(self.action_dim)
            next_state, reward, terminated, truncated, info = self.env.step(action)
            self.replay_buffer.push(state, action, reward, next_state, terminated)
            state = self.env.reset()[0] if terminated else next_state

    @torch.no_grad()
    def choose_action(self, state_tensor, epsilon: float = None):
        """使用epsilon-greedy选取一个动作"""
        if epsilon is not None:
            assert 0.0 <= epsilon <= 1.0, "epsilon is wrong"
            if np.random.uniform() < epsilon:
                return np.random.choice(self.action_dim)
        state_tensor = state_tensor.to(self.device)
        return self.q_net(state_tensor, self.embedding).argmax().item()

    @staticmethod
    def get_epsilon(progress: float):
        """得到递减的epsilon"""
        assert 0 <= progress <= 1.0
        return 1 / (99 * progress + 1)

    def train_one_batch(self):
        """单个智能体从自己的经验池中采样一个batch并训练"""
        states, actions, rewards, next_states, terminateds = self.replay_buffer.sample(
            self.batch_size, device=self.device
        )
        with torch.no_grad():
            td_targets = rewards + self.gamma * torch.max(
                self.q_target(next_states, self.embedding), dim=1, keepdim=True
            )[0] * (1 - terminateds)
        td_errors = td_targets - self.q_net(states, self.embedding).gather(1, actions)
        loss = torch.pow(td_errors, 2).mean()
        self.optimizer.zero_grad()
        loss.backward()
        self.optimizer.step()
        return loss

    def train(self, batch_nums: int):
        """训练单个DQN智能体"""
        progress_wrapper = tqdm(range(batch_nums), leave=False, disable=True)
        for _ in progress_wrapper:
            epsilon = self.get_epsilon(self.n_train / self.global_train_batchs)
            action = self.choose_action(torch.from_numpy(self.state).float().to(self.device), epsilon)
            next_state, reward, terminated, truncated, info = self.env.step(action)
            self.episode_reward += reward
            self.replay_buffer.push(self.state, action, reward, next_state, terminated)
            if terminated or truncated:
                # 每个episode结束后
                # 重置环境
                self.state, _ = self.env.reset()
                self.episode_reward_list.append(self.episode_reward)
                self.logger.add_scalar(f"reward/point-{self.env_index}", self.episode_reward, self.episode)
                self.episode += 1
                self.episode_reward = 0
            else:
                self.state = next_state

            loss = self.train_one_batch()
            self.n_train += 1
            # 更新target
            if self.n_train % self.update_interval == 0:
                self.q_target.load_state_dict(self.q_net.state_dict())
            # 记录loss
            self.logger.add_scalar(f"loss/point-{self.env_index}", loss, self.n_train)


class Server:
    """server角色"""

    def __init__(self, points: List[DQN], device: str = "cpu") -> None:
        """为保护用户隐私, 除了神经网络参数之外, 不能从节点读取任何数据"""
        self.points = points
        self.device = device
        self.avg_q_net = copy.deepcopy(self.points[0].q_net).to(self.device)
        self.avg_q_target = copy.deepcopy(self.points[0].q_target).to(self.device)

    def merge_params(self, merge_target: bool = False) -> None:
        """合并/分发参数"""
        for name, param in self.avg_q_net.state_dict().items():
            avg_param = torch.stack([p.q_net.state_dict()[name] for p in self.points]).mean(dim=0)
            param.data.copy_(avg_param.data)
        if merge_target is True:
            for name, param in self.avg_q_target.state_dict().items():
                avg_param = torch.stack([p.q_target.state_dict()[name] for p in self.points]).mean(dim=0)
                param.data.copy_(avg_param.data)
        for p in self.points:
            p.q_net.load_state_dict(self.avg_q_net.state_dict())
            if merge_target is True:
                p.q_target.load_state_dict(self.avg_q_target.state_dict())


class FedDQN:
    def __init__(
        self,
        point_configs: List[dict],
        merge_num: int,
        merge_interval: int,
        merge_target: bool,
        episode_num_eval: int,
        save_dir: str = None,
        device: str = "cpu",
    ) -> None:
        assert save_dir is not None, "save_dir can't be empty"
        self.device = device
        self.point_configs = point_configs
        self.merge_num = merge_num
        self.merge_interval = merge_interval
        self.merge_target = merge_target
        self.episode_num_eval = episode_num_eval
        self.save_dir = save_dir

        self.points = [DQN(**{**c, "device": self.device}) for c in point_configs]
        self.server = Server(self.points, device=self.device)
        self.logger = tb.SummaryWriter(self.save_dir / "global" / "log")

    def train(self):
        """总共合并训练self.merge_num次"""
        bar = tqdm(range(self.merge_num))
        for n in bar:
            for p in tqdm(self.points, leave=False, disable=True):
                p.train(self.merge_interval)
            self.server.merge_params(self.merge_target)
            avg_merge_episode_reward = self.evaluate_avg_reward()
            bar.set_description_str(f"reward->{int(avg_merge_episode_reward):3d}|")
            self.logger.add_scalar("aggregate/reward", avg_merge_episode_reward, global_step=n)
            self.save(self.save_dir / "server" / f"aggre_{n}.pt")
        self.summarize_point_reward()
        for p in self.points:
            p.logger.close()
        self.logger.close()

    def train_baseline(self):
        """训练baseline用于对照"""
        batch_num = self.points[0].global_train_batchs
        for p in tqdm(self.points, desc="Training baseline..."):
            p.train(batch_num)
        self.summarize_point_reward()
        env_num = len(self.points)
        table = np.zeros((env_num, env_num))
        for i in tqdm(range(env_num), desc="Evaluating..."):
            for j in range(env_num):
                point_r = 0
                s, _ = self.points[j].env.reset()
                while True:
                    a = self.points[i].choose_action(torch.from_numpy(s).float())
                    next_s, r, t1, t2, _ = self.points[j].env.step(a)
                    point_r += r
                    s = next_s
                    if t1 or t2:
                        break
                table[i][j] = point_r
        np.save(self.save_dir / "baseline.npy", table)
        for p in self.points:
            p.logger.close()
        self.logger.close()
        return table.mean()

    def evaluate_avg_reward(self):
        """每次merge之后每个节点评估一下奖励"""
        reward_list = []
        for p in self.points:
            env = copy.deepcopy(p.env)
            point_r = 0
            for _ in range(self.episode_num_eval):
                s, _ = env.reset()
                while True:
                    a = p.choose_action(torch.from_numpy(s).float())
                    next_s, r, t1, t2, _ = env.step(a)
                    point_r += r
                    s = next_s
                    if t1 or t2:
                        break
            reward_list.append(point_r / self.episode_num_eval)
        return sum(reward_list) / len(reward_list)

    def summarize_point_reward(self):
        """统计每个point已经完成的episode的奖励, 并按最短的长度取平均"""
        min_length = min([len(p.episode_reward_list) for p in self.points])
        table = []
        for p in self.points:
            table.append(p.episode_reward_list[:min_length])
            np.save(p.save_dir / "episode_reward_list.npy", np.array(p.episode_reward_list))
        avg_episode_reward = np.array(table).mean(0)
        plt.plot(range(min_length), avg_episode_reward), plt.grid(), plt.title("average episode reward")
        plt.savefig(self.save_dir / "global" / "average_episode_reward.svg")
        plt.close()

    def save(self, save_path):
        """保存权重"""
        Path(save_path).parent.mkdir(exist_ok=True)
        params = {"weights": self.server.avg_q_net.state_dict()}
        for p in self.points:
            params.update({f"embedding_{p.env_index}": p.embedding})
        torch.save(params, save_path)
