import torch
import torch.nn.functional as F
import torch.nn as nn
import csv
from envs.snake.snake import SnakeEnv
from envs.snake import setting
import collections
import random
import numpy as np
from tqdm import tqdm
import os
import copy

"""
bug：
1. csv写入文件的过程不能中断，否则文件无法正常写入
"""


class ReplayBuffer:
    ''' 经验回放池 '''
    def __init__(self, capacity):
        self.buffer = collections.deque(maxlen=capacity)  # 队列,先进先出
        self.longer_buffer = collections.deque(maxlen=capacity)  # 队列,先进先出

    def add(self, state, action, reward, next_state, done):  # 将数据加入buffer
        self.buffer.append((state, action, reward, next_state, done))

    def add1(self, state, action, reward, next_state, done):  # 将数据加入buffer
        self.buffer.append((state, action, reward, next_state, done))

    def add2(self, state, action, reward, next_state, done):  # 将数据加入buffer
        self.longer_buffer.append((state, action, reward, next_state, done))

    def sample(self, batch_size):  # 从buffer中采样数据,数量为batch_size
        transitions1 = random.sample(self.buffer, batch_size)
        transitions2 = random.sample(self.longer_buffer, batch_size)
        state, action, reward, next_state, done = zip(*(transitions1 + transitions2))
        return np.array(state), action, reward, np.array(next_state), done

    def size(self):  # 目前buffer中数据的数量
        return len(self.longer_buffer)


# class Qnet(torch.nn.Module):
#     def __init__(self, state_dim=0, hidden_dim=0, action_dim=0):
#         super(Qnet, self).__init__()
#         self.stack = nn.Sequential(
#             nn.Flatten(),
#             nn.Linear(state_dim, hidden_dim),
#             nn.ReLU(),
#             nn.Linear(hidden_dim, hidden_dim),
#             nn.ReLU()
#         )
#         self.fc_A = nn.Linear(hidden_dim, action_dim)
#         self.fc_V = nn.Linear(hidden_dim, 1)
#
#
#     def forward(self, x):
#         A = self.fc_A(self.stack(x))
#         V = self.fc_V(self.stack(x))
#         Q = V + A - A.mean(1).view(-1, 1)  # Q值由V值和A值计算得到
#         return Q


class Qnet(torch.nn.Module):
    def __init__(self, state_dim=0, hidden_dim=1000, action_dim=0):
        super(Qnet, self).__init__()
        self.stack1 = nn.Sequential(
            nn.Linear(setting.max_size * setting.max_size, hidden_dim),
            nn.ReLU(),
            nn.Linear(hidden_dim, 10),
            nn.ReLU()
        )
        self.stack2 = nn.Sequential(
            nn.Linear(16, 100),
            nn.ReLU(),
            nn.Linear(100, 4)
        )

    def forward(self, x):
        x = torch.Tensor(x)
        x1, x2 = torch.split(x, (6, setting.max_size * setting.max_size), dim=1)

        x2 = self.stack1(x2)

        x = torch.cat([x1, x2], dim=1)
        # print(x.shape)
        return self.stack2(x)


# net = Qnet()
# x = torch.rand(64, 406)
#
# print(net(x))


class DQN:
    ''' DQN算法,包括Double DQN '''
    def __init__(self,
                 state_dim,
                 hidden_dim,
                 action_dim,
                 learning_rate,
                 gamma,
                 epsilon,
                 target_update,
                 device,
                 dqn_type='VanillaDQN', init_type = 0, path=''):
        if init_type == 0:
            self.action_dim = action_dim
            self.q_net = Qnet(state_dim, hidden_dim, self.action_dim).to(device)
            self.target_q_net = Qnet(state_dim, hidden_dim,
                                     self.action_dim).to(device)
            self.optimizer = torch.optim.Adam(self.q_net.parameters(),
                                              lr=learning_rate)
            self.gamma = gamma
            self.epsilon = epsilon
            self.target_update = target_update
            self.count = 0
            self.dqn_type = dqn_type
            self.device = device
        else:
            self.action_dim = 4
            self.device = 'cuda'
            self.q_net = Qnet(406,512,4).to(self.device)
            self.q_net.load_state_dict(torch.load(path))
            self.epsilon = 0

    def take_action(self, state):
        if self.epsilon > 0.01:
            if (self.count + 1) % 2000 == 0:
                self.epsilon -= 0.01
        if np.random.random() < self.epsilon:
            action = np.random.randint(self.action_dim)
        else:
            state = torch.tensor([state], dtype=torch.float).to(self.device)
            action = self.q_net(state).argmax().item()
        return action

    """
    通过搜索，得到四个方向上未来的n步奖励，然后选取奖励最大的动作
    n_steps 默认为4，搜索空间为4**4
    """
    # def take_action_by_search(self, state,  n_steps=3):
    #     """
    #     states: 状态集合
    #     flag: 剩余寻找次数
    #     re: 总奖励
    #     """
    #     def dfs(states: list, flag: int, rs: int):
    #         if flag == 0:
    #             return rs
    #         temp_states = []
    #         for s in states:
    #             for j in range(temp_env.action_space.n):
    #                 temp_env.data_import(s)
    #                 next_s, r, d, _ = temp_env.step(j)
    #                 rs += r
    #                 # print(temp_env.body[0], d)
    #                 if not d:
    #                     # print(r)
    #                     temp_states.append(temp_env.data_export())
    #         return dfs(temp_states, flag-1, rs)
    #
    #     # 复制一个当前的环境
    #     temp_env = copy.deepcopy(env)
    #     # 得到四个方向的状态以及回报
    #     s_r = []
    #     s_s = []
    #     s_d = []
    #     for i in range(temp_env.action_space.n):
    #         temp_env.data_import(env.data_export())
    #         next_s, next_r, d, _ = temp_env.step(i)
    #         s_r.append(next_r)
    #         s_s.append(temp_env.data_export())
    #         s_d.append(d)
    #
    #     # print(s_r)
    #     for i in range(len(s_d)):
    #         if not s_d[i]:
    #             s_r[i] = dfs([s_s[i]], n_steps-1, s_r[i])
    #
    #     return np.argmax(s_r)

    def max_q_value(self, state):
        state = torch.tensor([state], dtype=torch.float).to(self.device)
        return self.q_net(state).max().item()

    def save(self, epoch):
        if not os.path.exists('models'):
            os.mkdir('models')
        torch.save(self.q_net.state_dict(), 'models/dqn_local_search_' + str(epoch) + '.pth')

    def update(self, transition_dict):
        states = torch.tensor(transition_dict['states'],
                              dtype=torch.float).to(self.device)
        actions = torch.tensor(transition_dict['actions']).view(-1, 1).to(
            self.device)
        rewards = torch.tensor(transition_dict['rewards'],
                               dtype=torch.float).view(-1, 1).to(self.device)
        next_states = torch.tensor(transition_dict['next_states'],
                                   dtype=torch.float).to(self.device)
        dones = torch.tensor(transition_dict['dones'],
                             dtype=torch.float).view(-1, 1).to(self.device)

        q_values = self.q_net(states).gather(1, actions)  # Q值
        # 下个状态的最大Q值
        max_action = self.q_net(next_states).max(1)[1].view(-1, 1)
        max_next_q_values = self.target_q_net(next_states).gather(1, max_action)
        q_targets = rewards + self.gamma * max_next_q_values * (1 - dones)  # TD误差目标
        dqn_loss = torch.mean(F.mse_loss(q_values, q_targets))  # 均方误差损失函数
        self.optimizer.zero_grad()  # PyTorch中默认梯度会累积,这里需要显式将梯度置为0
        dqn_loss.backward()  # 反向传播更新参数
        self.optimizer.step()

        if self.count % self.target_update == 0:
            self.target_q_net.load_state_dict(
                self.q_net.state_dict())  # 更新目标网络
        self.count += 1

    # 导入一个模型
    def load_dict(self, path):
        self.q_net.load_state_dict(torch.load(path))



def main():
    lr = 2e-3
    num_episodes = 300000
    hidden_dim = 512
    gamma = 0.95
    # gamma = 0.98  修改前
    epsilon = 0.1
    # epsilon = 0.5 修改前
    target_update = 50
    # target_update = 20 修改前
    buffer_size = 20000
    minimal_size = 2000
    batch_size = 128
    max_step = 1999
    device = torch.device("cuda") if torch.cuda.is_available() else torch.device(
        "cpu")
    print(device)
    env = SnakeEnv()
    # env.change_map()

    # 普通经验池，存放所有的经验数据
    replay_buffer = ReplayBuffer(buffer_size)

    # 大于 threshold_buffer_longer 会被收入到 replay_buffer_longer 中，然后进行训练
    threshold_buffer_longer = 12
    state_dim = 6 + setting.max_size * setting.max_size
    action_dim = env.action_space.n
    agent = DQN(state_dim, hidden_dim, action_dim, lr, gamma, epsilon,
                target_update, device)
    agent.load_dict('models\dqn_local_search_10000.0.pth')
    return_list = []

    log = csv.writer(open('logs/log_0.csv', 'w', newline=''))

    num_iter = 300
    # 地图的变化频率
    change_map_fre = 500
    for i in range(num_iter):
        with tqdm(total=int(num_episodes / num_iter), desc='Iteration %d' % i) as pbar:
            for i_episode in range(int(num_episodes / num_iter)):
                episode_return = 0
                # 改变地图尺寸
                if (num_episodes / num_iter * i + i_episode + 1) % change_map_fre == 0:
                    max_step = env.change_map()
                    # print(setting.height, setting.width)
                state = env.reset()
                done = False
                step = 0
                while not done and step < max_step:

                    step += 1
                    action = agent.take_action(state)

                    next_state, reward, done, l = env.step(action)

                    replay_buffer.add1(state, action, reward, next_state, done)

                    if l >= threshold_buffer_longer:
                        replay_buffer.add2(state, action, reward, next_state, done)

                    state = next_state
                    episode_return += reward
                    # print(1)
                    # 当buffer数据的数量超过一定值后,才进行Q网络训练
                    if replay_buffer.size() > minimal_size:
                        b_s, b_a, b_r, b_ns, b_d = replay_buffer.sample(batch_size)
                        transition_dict = {
                            'states': b_s,
                            'actions': b_a,
                            'next_states': b_ns,
                            'rewards': b_r,
                            'dones': b_d
                        }
                        agent.update(transition_dict)

                return_list.append(episode_return)

                if (i_episode + 1) % 10 == 0:
                    if i_episode == int(num_episodes / num_iter) - 1:
                        pbar.set_postfix({
                            'episode':
                                '%d' % (num_episodes / num_iter * i + i_episode + 1),
                            'return':
                                '%.3f' % np.mean(return_list[-int(num_episodes / num_iter):])
                        })
                    else:
                        pbar.set_postfix({
                            'episode':
                                '%d' % (num_episodes / num_iter * i + i_episode + 1),
                            'return':
                                '%.3f' % np.mean(return_list[-10:])
                        })
                if (i_episode + 1) % 10 == 0:
                    # print('写入文件')
                    log.writerow([episode_return])
                # print(num_episodes / 10 * i + i_episode + 1)
                if (num_episodes / num_iter * i + i_episode + 1) % 1000 == 0:
                    # print('模型保存')
                    log = csv.writer(
                        open('logs/log_' + str(num_episodes / num_iter * i + i_episode + 1) + '.csv', 'w', newline=''))
                    agent.save(num_episodes / num_iter * i + i_episode + 1)
                pbar.update(1)


if __name__ == '__main__':
    main()

# 测试一下搜索树的搜索奖励
# state = env.reset()
# agent.take_action_by_search(state)
