import os.path
import time
import random
import numpy as np

import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.tensorboard import SummaryWriter

from .network import NatureCNN, NonLinearQNet
from .utils import ReplayBuffer, LinearSchedule, dict_to_np, prepare_obs


class Network(nn.Module):
    def __init__(self, args, in_depth, action_shape):
        super().__init__()
        self.backbone = NatureCNN(in_depth)
        self.q_network = NonLinearQNet(self.backbone.out_features, args.q_hidden_features, action_shape,
                                       args.v_min, args.v_max, args.n_atoms, args.noisy_std, args.log_softmax)

    def forward(self, x, action=None):
        features = self.backbone(x)
        actions, logit = self.q_network(features, action)
        return actions, logit

    def reset_noise(self):
        self.q_network.reset_noise()

    def get_q_dist(self, x):
        features = self.backbone(x)
        q_dist = self.q_network.get_q_dist(features)
        return q_dist


class Policy:
    def __init__(self, args, agents, observation_space, action_space, device):
        self.args = args
        self.device = device
        self.agents = agents
        self.q_policy = dict()
        self.q_target = dict()
        self.optimizer = dict()
        for agent in agents:
            self.q_policy[agent] = Network(args, observation_space(agent).shape[0], action_space(agent).n).to(
                self.device)
            self.q_target[agent] = Network(args, observation_space(agent).shape[0], action_space(agent).n).to(
                self.device)
            self.optimizer[agent] = optim.Adam(self.q_policy[agent].parameters(), lr=self.args.learning_rate,
                                               eps=self.args.eps, betas=(0.9, 0.999))
        self.update_target()

    def select_action(self, obs):
        dict_actions = dict()
        np_actions = []
        for agent_idx, agent in enumerate(self.agents):
            dict_actions[agent], _ = self.q_policy[agent](prepare_obs(obs[:, agent_idx, :, :, :]).to(self.device))
            dict_actions[agent] = dict_actions[agent].item()
            np_actions.append(dict_actions[agent])
        return dict_actions, np.stack(np_actions)

    def learn(self, data):
        logs = dict()
        bml = 0
        obs, next_obs, actions, rewards, dones = (
            data['obs'], data['next_obs'], data['action'], data['reward'], data['done'])
        for agent_idx, agent in enumerate(self.agents):
            loss_rainbow, q_values, before_mean_loss = self.learn_rainbow(agent,
                                                                          obs[:, agent_idx, :, :, :],
                                                                          next_obs[:, agent_idx, :, :, :],
                                                                          actions[:, agent_idx, :],
                                                                          rewards[:, agent_idx, :],
                                                                          dones[:, agent_idx, :])
            loss = loss_rainbow
            self.optimizer[agent].zero_grad()
            loss.backward()
            self.optimizer[agent].step()
            bml += before_mean_loss
            logs[agent + "/loss"] = loss.item()
            logs[agent + "/loss_rainbow"] = loss_rainbow.item()
            logs[agent + "/q_values"] = q_values
        return logs, bml

    def learn_rainbow(self, agent, obs, next_obs, actions, rewards, dones):
        with torch.no_grad():
            next_actions, _ = self.q_policy[agent](next_obs)
            _, next_pmfs = self.q_target[agent](next_obs, next_actions)
            # _, next_pmfs = self.q_target(next_obs)
            next_atoms = rewards + pow(self.args.gamma, self.args.n_steps) * self.q_target[agent].q_network.atoms * (
                ~dones)
            # projection
            delta_z = self.q_target[agent].q_network.atoms[1] - self.q_target[agent].q_network.atoms[0]
            tz = next_atoms.clamp(self.args.v_min, self.args.v_max)

            b = (tz - self.args.v_min) / delta_z
            l = b.floor().clamp(0, self.args.n_atoms - 1)
            u = b.ceil().clamp(0, self.args.n_atoms - 1)
            # (l == u).float() handles the case where bj is exactly an integer
            # example bj = 1, then the upper ceiling should be uj= 2, and lj= 1
            d_m_l = (u + (l == u).float() - b) * next_pmfs
            d_m_u = (b - l) * next_pmfs
            target_pmfs = torch.zeros_like(next_pmfs)
            for i in range(target_pmfs.size(0)):
                target_pmfs[i].index_add_(0, l[i].long(), d_m_l[i])
                target_pmfs[i].index_add_(0, u[i].long(), d_m_u[i])
        _, old_pmfs = self.q_policy[agent](obs, actions.squeeze(-1))
        before_mean_loss = (-(target_pmfs * old_pmfs.clamp(min=1e-5, max=10 - 1e-5).log()).sum(-1))
        loss = before_mean_loss.mean()
        old_val = (old_pmfs * self.q_policy[agent].q_network.atoms).sum(1)
        return loss, old_val.mean().item(), before_mean_loss.detach().cpu().numpy()

    def update_target(self):
        for agent in self.agents:
            self.q_target[agent].load_state_dict(self.q_policy[agent].state_dict())

    def reset_noise(self):
        for agent in self.agents:
            self.q_policy[agent].reset_noise()


class IQN:
    def __init__(self, args, env, eval_env, device):
        self.paths = args
        self.model_list = self.paths.model_list
        self.args = args.algorithms
        self.env = env
        self.eval_env = eval_env
        self.device = device
        print(self.device)
        print("============================================================")

        self.writer = SummaryWriter(self.paths.save_path, flush_secs=2)
        self.policy = Policy(self.args, self.env.possible_agents, self.env.observation_space, self.env.action_space,
                             self.device)
        self.buffer = ReplayBuffer(self.args.buffer_size,
                                   (len(self.env.possible_agents),) + self.env.obs_shape,
                                   len(self.env.possible_agents),
                                   len(self.env.possible_agents),
                                   self.args.gamma,
                                   self.args.alpha,
                                   self.args.n_steps,
                                   self.device)

    def eval(self):
        obs, _ = self.eval_env.reset()
        obs = dict_to_np(obs)
        episode_reward = torch.zeros(1, len(self.eval_env.possible_agents))
        while True:
            actions, _ = self.policy.select_action(obs)
            next_obs, rewards, terminated, truncated, infos = self.eval_env.step(actions)
            obs = dict_to_np(next_obs)
            for agent_idx, agent in enumerate(self.eval_env.possible_agents):
                episode_reward[0, agent_idx] += rewards[agent]

            done = []
            for agent in self.eval_env.possible_agents:
                done.append(terminated[agent] or truncated[agent])
            if True in done:
                break
        return episode_reward, torch.sum(episode_reward)

    def run(self):
        per_beta_schedule = LinearSchedule(0, initial_value=self.args.beta_s, final_value=1.0,
                                           decay_time=self.args.max_steps)
        epsilon = LinearSchedule(0, initial_value=self.args.epsilon, final_value=0.001,
                                 decay_time=self.args.max_steps)
        global_step = 0
        episodes = 0 # 记录收集段数
        eval_times = 0
        training = True
        start_time = time.time()
        episode_reward = {agent: 0 for agent in self.env.possible_agents}
        obs, _ = self.env.reset()
        obs = dict_to_np(obs)
        ep_r = []
        while training:

            if random.random() < epsilon(global_step):
                actions = {agent: self.env.action_space(agent).sample() for agent in self.env.agents}
                np_actions = dict_to_np(actions)
            else:
                actions, np_actions = self.policy.select_action(obs)
            next_obs, rewards, terminated, truncated, infos = self.env.step(actions)
            for agent in self.env.possible_agents:
                episode_reward[agent] += rewards[agent]
            next_obs = dict_to_np(next_obs)
            rewards = dict_to_np(rewards)
            done = []
            for agent in self.env.possible_agents:
                done.append(terminated[agent] or truncated[agent])
            done = np.stack(done)
            self.buffer.add(obs, next_obs, np_actions, rewards, done)

            if True in done:
                episode_r = 0
                print(f"steps: {global_step}, episodes: {episodes}, ", end="")   # 打印
                for agent in self.env.possible_agents:
                    print(agent + ": " + str(episode_reward[agent]), end=", ")
                    self.writer.add_scalar(agent + "/episodic_reward", episode_reward[agent], global_step)
                    episode_r += episode_reward[agent]
                episode_reward = {agent: 0 for agent in self.env.possible_agents}
                ep_r.append(episode_reward)
                obs, _ = self.env.reset()
                obs = dict_to_np(obs)
                episodes += 1   # 段数加一
            else:
                obs = next_obs


            if global_step % self.args.train_freq == 0:
                self.policy.reset_noise()
            global_step += 1
            if global_step >= self.args.learning_starts:
                if global_step % self.args.train_freq == 0:
                    beta = per_beta_schedule(global_step)
                    self.writer.add_scalar("charts/beta", beta, global_step)
                    data = self.buffer.sample(self.args.batch_size, beta)
                    logs, um_loss = self.policy.learn(data)
                    new_priorities = np.abs(um_loss) + 1e-6
                    # Update replay buffer priorities
                    self.buffer.update_priorities(data['indexes'], new_priorities)
                    if global_step % 100 == 0:
                        for k, v in logs.items():
                            self.writer.add_scalar(k, v, global_step)
                        self.writer.add_scalar("charts/SPS", int(global_step / (time.time() - start_time)), global_step)
                if global_step % self.args.target_network_freq == 0:
                    self.policy.update_target()
            '''
            if (len(self.model_list) > 0 and eval_times < len(self.model_list)
                    and global_step >= self.model_list[eval_times]):
                eval_times += 1
                print("============================================================")
                print("eval")
                eval_rewards = []
                eval_path = open(os.path.join(self.paths.save_path, "eval" + str(global_step) + ".csv"), "a+")
                for _ in range(self.args.eval_times):
                    er, er_s = self.eval()
                    eval_rewards.append(er)
                    eval_path.write(str(er)+"\n")
                    eval_path.flush()

                eval_rewards = torch.stack(eval_rewards)  #
                avg_reward = torch.mean(eval_rewards, 0)
                print("avg reward:", avg_reward)
                eval_path.close()
                torch.save(self.policy.q_policy.state_dict(), self.paths.save_path + "/model.pth")
            '''
            if episodes > 200:
                break
        folder_path = f'./iql/env'  # 替换为实际的文件夹路径
        file_name = f'episode_reward.csv'  # 替换为实际的文件名

            # 检查文件夹是否存在，如果不存在则创建
        if not os.path.exists(folder_path):
            os.makedirs(folder_path)

                # 拼接完整的文件路径
        file_path = os.path.join(folder_path, file_name)

            # 将数据写入CSV文件
        with open(file_path, 'w') as file:
            for item in ep_r:
                file.write(str(item) + '\n')

        print(f"数据已保存至文件: {file_path}")

        # TODO: add evaluation phase here

        # eval_episodic_reward, eval_episodic_steps
        # self.writer.add_scalar()