import os.path
import time
import rand
import numpy as np

import torch
import torch.nn as nn
import torch.nn.functional as F

import torch.optim as optim
from torch.utils.tensorboard import SummaryWriter

from .network import NatureCNN, NonLinearQNet
from .utils import ReplayBuffer, LinearSchedule, prepare_obs, dict_to_np, RandomShiftsAug


class Network(nn.Module):
    def __init__(self, args, in_depth, action_shape):
        super().__init__()
        self.backbone = NatureCNN(in_depth)
        self.q_network = NonLinearQNet(self.backbone.out_features, args.q_hidden_features, action_shape,
                                       args.v_min, args.v_max, args.n_atoms, args.noisy_std, args.log_softmax)

    def forward(self, x, action):
        features = self.backbone(x)
        if action == None:
            qvalues, actions = self.q_network.get_q_max_action(features)
        else:
            actions = action
            qvalues = self.q_network.get_q_action(features, action)
        return actions, qvalues

    def reset_noise(self):
        self.q_network.reset_noise()

    def get_q_dist(self, x):
        features = self.backbone(x)
        q_dist = self.q_network.get_q_dist(features)
        return q_dist


class QMixNetwork(nn.Module):
    def __init__(self, input_size, hidden_size, output_size):
        super(QMixNetwork, self).__init__()
        self.linear1 = nn.Linear(input_size, hidden_size)
        self.relu = nn.ReLU()
        self.linear2 = nn.Linear(hidden_size, output_size)

    def forward(self, agent_qs):
        x = self.relu(self.linear1(agent_qs))
        q_tol = self.linear2(x)
        return q_tol

class Policy:
    def __init__(self, args, agents, in_depth, action_space, device):
        self.args = args
        self.agents = agents
        self.action_shape = action_space.n
        self.device = device
        self.q_policy = Network(args, in_depth, self.action_shape).to(self.device)
        self.q_target = Network(args, in_depth, self.action_shape).to(self.device)
        self.qmixer = QMixNetwork(len(agents), 10, 1)

        self.update_target()
        self.params = list(self.q_policy.parameters())
        self.params += list(self.qmixer.parameters())
        self.optimizer = optim.Adam(self.params, lr=self.args.learning_rate,
                                    eps=self.args.eps, betas=(0.9, 0.999))  # 优化参数为智能体q网络参数加qmix网络参数

    def select_action(self, obs, action=None):
        dict_actions = dict()
        np_actions = []
        for agent_idx, agent in enumerate(self.agents):
            dict_actions[agent], _ = self.q_policy(prepare_obs(obs[:, agent_idx, :, :, :]).to(self.device), action)
            dict_actions[agent] = dict_actions[agent].item()
            np_actions.append(dict_actions[agent])
        return dict_actions, np.stack(np_actions)

    def learn(self, data):
        #  ？？ loss_rainbow, q_values, before_mean_loss = self.learn_rainbow(data)
        loss = self.compute_loss(data)

        self.optimizer.zero_grad()
        loss.backward()
        self.optimizer.step()
        return loss

        ''''logs = {
            "losses/loss": loss.item(),
            "losses/loss_rainbow": loss_rainbow.item(),
            "losses/q_values": q_values,
        }
        return logs, before_mean_loss
        '''

    def agent_qANDnext_agent_q(self, obs, next_obs, actions):
        _, agent_q = self.q_policy(obs, actions)
        next_actions, agent_q_next = self.q_target(next_obs, action=None)
        # agent_q_next = torch.FloatTensor(agent_q_next)
        return agent_q, agent_q_next


    def compute_loss(self, data):  # 利用qtol计算损失函数
        obs, next_obs, actions, rewards, dones = (
            data['obs'], data['next_obs'], data['action'], data['reward'], data['done'])
        dones = torch.any(dones, dim=1)

        agent_qs = torch.zeros(obs.shape[0], obs.shape[1])
        agent_qs_next = torch.zeros_like(agent_qs)
        loss_function = nn.MSELoss()
        for agent_idx, agent in enumerate(self.agents):
            agent_qs[:, agent_idx], agent_qs_next[:, agent_idx] = self.agent_qANDnext_agent_q(obs[:, agent_idx, :, :, :],
                                                                          next_obs[:, agent_idx, :, :, :],
                                                                          actions[:, agent_idx, :])
        q_tot = self.qmixer(agent_qs).to(self.device)
        q_tot_next = self.qmixer(agent_qs_next).to(self.device)
        Target = torch.sum(rewards, dim=1).to(self.device) + self.args.gamma * q_tot_next * dones
        loss = loss_function(q_tot, Target)  # mse损失
        return loss



    def learn_rainbow(self, data):
        # 看不懂
        obs, next_obs, actions, rewards, dones = (
            data['obs'], data['next_obs'], data['action'], data['reward'], data['done'])
        with torch.no_grad():
            next_actions, _ = self.q_policy(next_obs)
            _, next_pmfs = self.q_target(next_obs, next_actions)
            # _, next_pmfs = self.q_target(next_obs)
            next_atoms = rewards + pow(self.args.gamma, self.args.n_steps) * self.q_target.q_network.atoms * (~dones)
            # projection
            delta_z = self.q_target.q_network.atoms[1] - self.q_target.q_network.atoms[0]
            tz = next_atoms.clamp(self.args.v_min, self.args.v_max)

            b = (tz - self.args.v_min) / delta_z
            l = b.floor().clamp(0, self.args.n_atoms - 1)
            u = b.ceil().clamp(0, self.args.n_atoms - 1)
            # (l == u).float() handles the case where bj is exactly an integer
            # example bj = 1, then the upper ceiling should be uj= 2, and lj= 1
            d_m_l = (u + (l == u).float() - b) * next_pmfs
            d_m_u = (b - l) * next_pmfs
            target_pmfs = torch.zeros_like(next_pmfs)
            for i in range(target_pmfs.size(0)):
                target_pmfs[i].index_add_(0, l[i].long(), d_m_l[i])
                target_pmfs[i].index_add_(0, u[i].long(), d_m_u[i])
        _, old_pmfs = self.q_policy(obs, actions.squeeze(-1))
        before_mean_loss = (-(target_pmfs * old_pmfs.clamp(min=1e-5, max=10 - 1e-5).log()).sum(-1))
        loss = before_mean_loss.mean()
        old_val = (old_pmfs * self.q_policy.q_network.atoms).sum(1)
        return loss, old_val.mean().item(), before_mean_loss.detach().cpu().numpy()

    def update_target(self):
        self.q_target.load_state_dict(self.q_policy.state_dict())

    def reset_noise(self):
        self.q_policy.reset_noise()

    def save_model(self, save_path):
        torch.save(self.q_policy.state_dict(), save_path)

    def load_model(self, model_path):
        self.q_policy.load_state_dict(torch.load(model_path))


class Qmix:
    def __init__(self, args, envs, eval_env, device):
        self.paths = args
        self.args = args.algorithms
        self.model_list = self.paths.model_list
        self.envs = envs
        self.eval_env = eval_env
        self.device = device
        '''
        print(self.envs.single_observation_space)
        print(self.envs.single_action_space)
        print(self.device)
        '''

        print("============================================================")

        self.writer = SummaryWriter(self.paths.save_path, flush_secs=2)
        agent0 = self.envs.possible_agents[0]
        self.policy = Policy(self.args, self.envs.possible_agents, self.envs.obs_shape[0], self.envs.action_space(agent0),
                             self.device)
        self.buffer = ReplayBuffer(self.args.buffer_size, (len(self.envs.possible_agents),) + self.envs.obs_shape,
                                   len(self.envs.possible_agents),
                                   len(self.envs.possible_agents),
                                   self.args.gamma,
                                   self.args.alpha,
                                   self.args.n_steps,
                                   self.device)

    def eval(self):
        obs, _ = self.eval_env.reset()
        obs = dict_to_np(obs)
        episode_reward = 0
        while True:
            actions, _ = self.policy.select_action(obs)
            next_obs, rewards, terminated, truncated, infos = self.eval_env.step(actions)
            obs = dict_to_np(next_obs)
            for agent in self.envs.possible_agents:
                episode_reward += rewards[agent]

            done = []
            for agent in self.envs.possible_agents:
                done.append(terminated[agent] or truncated[agent])
            if True in done:
                break
        return episode_reward


    def run(self):
        per_beta_schedule = LinearSchedule(0, initial_value=self.args.beta_s, final_value=1.0,
                                           decay_time=self.args.max_steps)
        epsilon = LinearSchedule(0, initial_value=self.args.epsilon, final_value=0.001,
                                 decay_time=self.args.max_steps)
        global_step = 0
        eval_times = 0
        training = True
        episodes = 0  # 记录收集段数
        start_time = time.time()
        episode_reward = {agent: 0 for agent in self.envs.possible_agents}   # 创建了一个名为episode_reward的字典，其中的键是self.env.possible_agents中的每个代理(agent)，对应的值初始化为0。
        ep_sum = 0
        obs, _ = self.envs.reset()
        obs = dict_to_np(obs)
        ep_r=[]
        while training:
            if random.random() < epsilon(global_step):
                actions = {agent: self.envs.action_space(agent).sample() for agent in self.envs.agents}
                np_actions = dict_to_np(actions)
            else:
                actions, np_actions = self.policy.select_action(obs, action=None)
            next_obs, rewards, terminated, truncated, infos = self.envs.step(actions)
            for agent in self.envs.possible_agents:
                episode_reward[agent] += rewards[agent]
                ep_sum += rewards[agent]

            next_obs = dict_to_np(next_obs)
            rewards = dict_to_np(rewards)
            done = []
            for agent in self.envs.possible_agents:
                done.append(terminated[agent] or truncated[agent])
            done = np.stack(done)
            self.buffer.add(obs, next_obs, np_actions, rewards, done)

            if True in done:
                print(f"steps: {global_step}, episodes: {episodes}, ", end="")  # 打印
                for agent in self.envs.possible_agents:
                    print(agent + ": " + str(episode_reward[agent]), end=", ")
                    self.writer.add_scalar(agent + "/episodic_reward", episode_reward[agent], global_step)
                print(ep_sum)
                ep_r.append(ep_sum)
                self.writer.add_scalar("episode_reward_sum", ep_sum, global_step)
                episode_reward = {agent: 0 for agent in self.envs.possible_agents}
                ep_sum = 0
                obs, _ = self.envs.reset()
                obs = dict_to_np(obs)
                episodes += 1  # 段数加一
            else:
                obs = next_obs

            # if global_step % self.args.train_freq == 0:
               # self.policy.reset_noise()
            global_step += 1

            if global_step >= self.args.learning_starts:
                if global_step % self.args.train_freq == 0:
                    beta = per_beta_schedule(global_step)
                    self.writer.add_scalar("charts/beta", beta, global_step)
                    data = self.buffer.sample(self.args.batch_size, beta)
                    loss = self.policy.learn(data)
                    self.writer.add_scalar('Loss', loss, global_step)
                    # logs, um_loss = self.policy.learn(data)
                    # new_priorities = np.abs(um_loss) + 1e-6
                    # Update replay buffer priorities
                    # self.buffer.update_priorities(data['indexes'], new_priorities)
                    ''''
                    if global_step % 100 == 0:
                        for k, v in logs.items():
                            self.writer.add_scalar(k, v, global_step)
                        self.writer.add_scalar("charts/SPS", int(global_step / (time.time() - start_time)), global_step)
                        '''
                if global_step % self.args.target_network_freq == 0:
                    self.policy.update_target()
                    print(global_step)
            if global_step > self.args.max_steps:
                break
            if episodes > 200:
                break
        folder_path = f'./qmix/env'  # 替换为实际的文件夹路径
        file_name = f'episode_reward.csv'  # 替换为实际的文件名

                # 检查文件夹是否存在，如果不存在则创建
        if not os.path.exists(folder_path):
            os.makedirs(folder_path)

                # 拼接完整的文件路径
        file_path = os.path.join(folder_path, file_name)

                # 将数据写入CSV文件
        with open(file_path, 'w') as file:
            for item in ep_r:
                file.write(str(item) + '\n')

        print(f"数据已保存至文件: {file_path}")
