import random
import cv2
import gym
import numpy as np
import time
import matplotlib
import matplotlib.pyplot as plt
import torch
import torch.nn.functional as F
import torch.nn as nn
from matplotlib import animation

from PIL import Image
from replay_memory import ReplayMemory
from collections import namedtuple
import copy

DQNTransition = namedtuple('DQNTransition', ['state', 'action', 'reward', 'next_state', 'done'])


class Agent(object):
    def __init__(self,
                 env: gym.core.Env,
                 policy_network: torch.nn.Module = None):
        # Image pre process params
        self.target_h = 80  # Height after process
        self.target_w = 64  # Widht after process
        # State size for breakout env. SS images (210, 160, 3). Used as input size in network
        self.state_size_h = env.observation_space.shape[0]
        self.state_size_w = env.observation_space.shape[1]
        self.state_size_c = env.observation_space.shape[2]
        self.crop_dim = [20, self.state_size_h, 0,
                         self.state_size_w]  # Cut 20 px from top to get rid of the score table

        self.env = env
        self.n_actions = env.action_space.n
        self.observation_shape = env.observation_space.shape

        self.policy_network = policy_network or self._build_policy_network()
        self.target_network = copy.deepcopy(self.policy_network)
        self.target_network.load_state_dict(self.policy_network.state_dict())
        self.optimizer = torch.optim.RMSprop(self.policy_network.parameters())

        self.episode_reward_list = []
        self.episode_loss_list = []
        self.loss = 0

    def _build_policy_network(self):
        """
        默认创建一个CNN网络
        """
        return DuelCNN(self.target_h, self.target_w, self.n_actions)

    def save(self, path: str = "./model/", **kwargs):
        """
        保存模型
        """
        # torch.save(self.policy_network.model, path + 'PG-Pong_net.pt')
        torch.save(self.policy_network.model.state_dict(), path + "opt.pt")

    def restore(self, filename: str):
        self.policy_network.model.load_state_dict(torch.load(filename))

    def get_action(self, state: torch.Tensor, **kwargs) -> int:
        """
        训练的时候，根据概率选择action。前期偏向于随机选择action，后期偏向由模型输出动作
        """
        threshold: float = kwargs.get('threshold', 0)

        rand = random.random()

        if rand < threshold:
            return int(np.random.choice(self.n_actions))
        else:
            act = self.predict(state)
            # return prob.argmax().numpy()  # 根据动作概率选择概率最高的动作
            return int(act)  # 根据动作概率选择概率最高的动作

    def predict(self, obs):
        """ 根据观测值 obs 选择最优动作
        """
        prob = self.policy_network.forward(obs.unsqueeze(0))
        act = torch.argmax(prob).item()  # 根据动作概率选择概率最高的动作
        return act

    def _apply_gradient_descent(self, memory, batch_size: int, learning_rate: float, discount_factor: float,
                                device: str = "cpu"):
        """
        梯度下降更新权重
        """
        if len(memory) < batch_size:
            return
        transitions = memory.sample(batch_size)
        # Transpose the batch (see https://stackoverflow.com/a/19343/3343043 for
        # detailed explanation). This converts batch-array of Transitions
        # to Transition of batch-arrays.
        batch = DQNTransition(*zip(*transitions))

        # Compute a mask of non-final states and concatenate the batch elements
        # (a final state would've been the one after which simulation ended)
        non_final_mask = torch.tensor(tuple(map(lambda s: s is not None,
                                                batch.next_state)), device=device, dtype=torch.uint8)
        non_final_next_states = torch.stack([s for s in batch.next_state
                                           if s is not None])
        # torch.cat concatenates tensor sequence

        state_batch = torch.stack(batch.state)
        action_batch = torch.tensor(batch.action, dtype=torch.int64)
        reward_batch = torch.tensor(batch.reward)

        # Make predictions
        state_q_values = self.policy_network(state_batch)
        next_states_q_values = self.policy_network(non_final_next_states)
        next_states_target_q_values = self.target_network(non_final_next_states)

        # Compute Q(s_t, a) - the model computes Q(s_t), then we select the
        # columns of actions taken. These are the actions which would've been taken
        # for each batch state according to policy_net

        next_states_target_q_values = self.target_network(non_final_next_states)

        # Find selected action's q_value
        selected_q_value = state_q_values.gather(1, action_batch.unsqueeze(1)).squeeze(1)
        # Get indice of the max value of next_states_q_values
        # Use that indice to get a q_value from next_states_target_q_values
        # We use greedy for policy So it called off-policy
        next_states_target_q_value = next_states_target_q_values.gather(1, next_states_q_values.max(1)[1].unsqueeze(
            1)).squeeze(1)
        # Use Bellman function to find expected q value
        expected_q_value = reward_batch + discount_factor * next_states_target_q_value

        # Calc loss with expected_q_value and q_value
        loss = (selected_q_value - expected_q_value.detach()).pow(2).mean()

        # loss = F.smooth_l1_loss(state_action_values, -expected_q_value)  # Compute Huber loss

        # Optimize the model
        self.optimizer.zero_grad()
        loss.backward()
        self.optimizer.step()
        # print(loss)
        self.loss = loss

        # return loss, torch.max(state_q_values).item()

    def preProcess(self, image):
        """
        Process image crop resize, grayscale and normalize the images
        """
        frame = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)  # To grayscale
        frame = frame[self.crop_dim[0]:self.crop_dim[1], self.crop_dim[2]:self.crop_dim[3]]  # Cut 20 px from top
        frame = cv2.resize(frame, (self.target_w, self.target_h))  # Resize
        frame = frame.reshape(self.target_w, self.target_h) / 255  # Normalize

        return frame

    def train(self,
              n_steps: int = 1000,
              n_episodes: int = 10,
              save_every: int = None,
              save_path: str = 'agent/',
              device: str = "cpu",
              callback: callable = None,
              **kwargs) -> float:
        batch_size: int = kwargs.get('batch_size', 256)
        memory_capacity: int = kwargs.get('memory_capacity', n_steps * 10)
        discount_factor: float = kwargs.get('discount_factor', 0.95)
        learning_rate: float = kwargs.get('learning_rate', 0.01)
        eps_start: float = kwargs.get('eps_start', 0.9)
        eps_end: float = kwargs.get('eps_end', 0.05)
        eps_decay_steps: int = kwargs.get('eps_decay_steps', n_steps)
        update_target_every: int = kwargs.get('update_target_every', 1000)
        render_interval: int = kwargs.get('render_interval',
                                          n_steps // 10)  # in steps, None for episode end renderers only

        memory = ReplayMemory(memory_capacity, transition_type=DQNTransition)
        episode = 0
        total_steps_done = 0
        total_reward = 0
        stop_training = False

        for i_episode in range(n_episodes):
            # Initialize the environment and state
            state, _ = self.env.reset()
            done = False
            steps_done = 0
            episode_reward = 0

            avg_loss = 0
            while not done:
                state = self.preProcess(state)  # Process image
                # We stack frames like 4 channel image
                state = np.stack((state, state, state, state))
                state = torch.tensor(state, dtype=torch.float)
                # Select and perform an action
                threshold = eps_end + (eps_start - eps_end) * np.exp(-total_steps_done / eps_decay_steps)
                action = self.get_action(state, threshold=threshold)
                next_state, reward, done, _, info = self.env.step(action)

                mem_next_state = self.preProcess(next_state)
                # We stack frames like 4 channel image
                mem_next_state = np.stack((mem_next_state, state[0], state[1], state[2]))
                memory.push(state, action, reward, torch.tensor(mem_next_state, dtype=torch.float), done)
                # Move to the next state
                state = next_state

                episode_reward += reward
                steps_done += 1
                total_steps_done += 1
                self._apply_gradient_descent(memory, batch_size, learning_rate, discount_factor)
                avg_loss += float(self.loss)
                # print(float(self.loss))
                if n_steps and steps_done >= n_steps:
                    done = True
            avg_loss = avg_loss/steps_done
            self.episode_reward_list.append(episode_reward)
            self.episode_loss_list.append(avg_loss)
            self.plot_durations()
            self.plot_loss()
            total_reward += episode_reward
            print(f"episode{i_episode} total_reward{total_reward} episode_reward:{episode_reward} episode_avgloss:{avg_loss}")
            # Update the target network, copying all weights and biases in DQN
            is_checkpoint = save_every and i_episode % save_every == 0
            if is_checkpoint:
                self.target_network.load_state_dict(self.policy_network.state_dict())
        # return

    # Plotting
    def plot_durations(self):
        plt.figure(1)
        plt.clf()
        episode_reward = torch.tensor(self.episode_reward_list, dtype=torch.float)
        plt.title('Training...')
        plt.xlabel('Episode')
        plt.ylabel('Reward')
        # plt.scatter(range(episode_number), episode_reward.numpy(), label='Score', s=1)
        plt.plot(episode_reward.numpy(), label='Reward')
        # matplotlib.pyplot.hlines(100, 0, episode_number, colors='red', linestyles=':', label='Win Threshold')
        plt.legend(loc='upper left')
        # plt.savefig('./save_graph/cartpole_dqn_vision_test.png') # for saving graph with latest 100 mean
        plt.pause(0.001)  # pause a bit so that plots are updated
        plt.savefig('./img/' + "cnn_episode_reward.png")
        np.save('./img/' + "cnn_Reward.npy", episode_reward.numpy())

    def plot_loss(self):
        episode_loss = torch.tensor(self.episode_loss_list, dtype=torch.float)
        x_len = len(episode_loss)
        if x_len < 3:
            return
        plt.figure(2)
        plt.clf()

        plt.title('Training...')
        plt.xlabel('Episode')
        plt.ylabel('Loss')
        plt.plot(list(range(x_len))[3:], episode_loss.numpy()[3:], label='Loss')
        # matplotlib.pyplot.hlines(100, 0, episode_number, colors='red', linestyles=':', label='Win Threshold')

        plt.legend(loc='upper left')
        # plt.savefig('./save_graph/cartpole_dqn_vision_test.png') # for saving graph with latest 100 mean
        plt.pause(0.001)  # pause a bit so that plots are updated
        plt.savefig('./img/' + "cnn_episode_loss.png")
        np.save('./img/' + "cnn_Loss.npy", episode_loss.numpy())


class DuelCNN(nn.Module):
    """
    CNN with Duel Algo. https://arxiv.org/abs/1511.06581
    """
    def __init__(self, h, w, output_size):
        super(DuelCNN, self).__init__()
        self.conv1 = nn.Conv2d(in_channels=4,  out_channels=32, kernel_size=8, stride=4)
        self.bn1 = nn.BatchNorm2d(32)
        convw, convh = self.conv2d_size_calc(w, h, kernel_size=8, stride=4)
        self.conv2 = nn.Conv2d(in_channels=32, out_channels=64, kernel_size=4, stride=2)
        self.bn2 = nn.BatchNorm2d(64)
        convw, convh = self.conv2d_size_calc(convw, convh, kernel_size=4, stride=2)
        self.conv3 = nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3, stride=1)
        self.bn3 = nn.BatchNorm2d(64)
        convw, convh = self.conv2d_size_calc(convw, convh, kernel_size=3, stride=1)

        linear_input_size = convw * convh * 64  # Last conv layer's out sizes

        # Action layer
        self.Alinear1 = nn.Linear(in_features=linear_input_size, out_features=128)
        self.Alrelu = nn.LeakyReLU()  # Linear 1 activation funct
        self.Alinear2 = nn.Linear(in_features=128, out_features=output_size)

        # State Value layer
        self.Vlinear1 = nn.Linear(in_features=linear_input_size, out_features=128)
        self.Vlrelu = nn.LeakyReLU()  # Linear 1 activation funct
        self.Vlinear2 = nn.Linear(in_features=128, out_features=1)  # Only 1 node

    def conv2d_size_calc(self, w, h, kernel_size=5, stride=2):
        """
        Calcs conv layers output image sizes
        """
        next_w = (w - (kernel_size - 1) - 1) // stride + 1
        next_h = (h - (kernel_size - 1) - 1) // stride + 1
        return next_w, next_h

    def forward(self, x):
        x = F.relu(self.bn1(self.conv1(x)))
        x = F.relu(self.bn2(self.conv2(x)))
        x = F.relu(self.bn3(self.conv3(x)))

        x = x.view(x.size(0), -1)  # Flatten every batch

        Ax = self.Alrelu(self.Alinear1(x))
        Ax = self.Alinear2(Ax)  # No activation on last layer

        Vx = self.Vlrelu(self.Vlinear1(x))
        Vx = self.Vlinear2(Vx)  # No activation on last layer

        q = Vx + (Ax - Ax.mean())

        return q

if __name__ == '__main__':
    env = gym.make('Pong-v4')
    dqn = Agent(env)
    dqn.train(n_episodes=1000, n_steps=500, save_every=100)
