from collections import deque
import torch
import torch.nn as nn
import torch.optim as optim
import torchvision.transforms as transforms
from torch.distributions import Categorical
import torch.utils.data as Data
import matplotlib.pyplot as plt
import time
import cv2
import sys
import random
sys.path.append("game/")
import wrapped_flappy_bird as game
import numpy as np


# Hyper Parameters:
FRAME_PER_ACTION = 1
GAMMA = 0.99  # decay rate of past observations
OBSERVE = 32768.  # timesteps to observe before training
EXPLORE = 1000.  # frames over which to anneal epsilon
FINAL_EPSILON = 0.0  # 0.001 # final value of epsilon
INITIAL_EPSILON = 0.0  # 0.01 # starting value of epsilon
REPLAY_MEMORY = 32768  # 满二叉树的节点个数：2的k次方-1
BATCH_SIZE = 64  # size of minibatch
UPDATE_TIME = 100
LR = 1e-6
N_ACTIONS = 2  # 可采取的action的数量

IsLoadPkl = True# 是否加载训练好的pkl文件
PklPath = 'saved_networks/600000network_ql.pkl'

print(torch.cuda.is_available())

from torch.utils.tensorboard import SummaryWriter
writer = SummaryWriter('D:/PythonProject/tensorboarddir')

# preprocess raw image to 80*80 gray image
def convert_image(observation):
    observation = cv2.cvtColor(cv2.resize(observation, (80, 80)), cv2.COLOR_BGR2GRAY)
    ret, observation = cv2.threshold(observation, 1, 255, cv2.THRESH_BINARY)
    return np.reshape(observation, (80, 80, 1))


train_transform = transforms.Compose([
    transforms.ToPILImage(),
    transforms.ToTensor(),  # 將圖片轉成 Tensor，並把數值normalize到[0,1](data normalization)
])


class ReplayData(object):
    def __init__(self, s, a, r, s_, t):
        self.s = train_transform(s)
        self.a = torch.tensor([a], dtype=torch.long)
        self.r = torch.tensor([r], dtype=torch.float64)
        self.s_ = train_transform(s_)
        self.t = t

    def __getitem__(self, index):
        return self.s, self.a, self.r, self.s_, self.t

class QNetwork(nn.Module):  # 定义了actor的结构
    def __init__(self):
        super().__init__()
        self.cnn = nn.Sequential(
            # conv1层，输入的灰度图，所以 in_channels=1, out_channels=8 说明使用了8个滤波器/卷积核，
            # kernel_size=3卷积核大小3x3
            # input 維度 [1, 80, 80]
            nn.Conv2d(1, 8, 3, 1, 1),  # [8, 80, 80] 这边的注释的是输出的维度
            nn.BatchNorm2d(8),
            nn.ReLU(),
            nn.MaxPool2d(2, 2, 0),  # [8, 40, 40]

            nn.Conv2d(8, 16, 3, 1, 1),  # [16, 40, 40]
            nn.BatchNorm2d(16),
            nn.ReLU(),
            nn.MaxPool2d(2, 2, 0),  # [16, 20, 20]

            nn.Conv2d(16, 16, 3, 1, 1),  # [16, 20, 20]
            nn.BatchNorm2d(16),
            nn.ReLU(),
            nn.MaxPool2d(2, 2, 0),  # [16, 10, 10]
        )

        self.fc = nn.Sequential(  # 定义全连接层，使用Linear激活函数
            nn.Linear(16 * 10 * 10, 512),
            nn.ReLU(),
            nn.Linear(512, 2),
        )

    def forward(self, state):
        out = self.cnn(state)
        out = out.view(out.size()[0], -1)
        out = self.fc(out)
        return out


class SumTree(object):
    """
    This SumTree code is a modified version and the original code is from:
    https://github.com/jaara/AI-blog/blob/master/SumTree.py
    Story data with its priority in the tree.
    """
    data_pointer = 0

    def __init__(self, capacity):
        self.capacity = capacity  # for all priority values replay保存的个数
        self.tree = np.zeros(2 * capacity - 1) # tree用一个数组模拟，树里面叶子节点保存p值
        # [--------------Parent nodes-------------][-------leaves to recode priority-------]
        #             size: capacity - 1                       size: capacity
        self.data = np.zeros(capacity, dtype=object)  # 放ReplayData
        # [--------------data frame-------------]
        #             size: capacity

    def add(self, p, data):
        tree_idx = self.data_pointer + self.capacity - 1
        self.data[self.data_pointer] = data  # update data_frame
        self.update(tree_idx, p)  # update tree_frame
        # data_pointer超出了最大个数开始循环
        self.data_pointer += 1
        if self.data_pointer >= self.capacity:  # replace when exceed the capacity
            self.data_pointer = 0

    def update(self, tree_idx, p):
        change = p - self.tree[tree_idx] # change的值在第二轮及之后会用到，第一轮的self.tree[tree_idx]都是0
        self.tree[tree_idx] = p
        # then propagate the change through tree
        while tree_idx != 0:    # this method is faster than the recursive loop in the reference code
            tree_idx = (tree_idx - 1) // 2 # //表示整除，返回整数
            self.tree[tree_idx] += change

    def print_tree(self):
        print("tree", self.tree)

    def get_leaf(self, v):
        """
        Tree structure and array storage:
        Tree index:
             0         -> storing priority sum
            / \
          1     2
         / \   / \
        3   4 5   6    -> storing priority for transitions
        Array type for storing:
        [0,1,2,3,4,5,6]
        """
        parent_idx = 0
        while True:     # the while loop is faster than the method in the reference code
            cl_idx = 2 * parent_idx + 1         # this leaf's left and right kids
            cr_idx = cl_idx + 1
            if cl_idx >= len(self.tree):        # reach bottom, end search
                leaf_idx = parent_idx
                break
            else:       # downward search, always search for a higher priority node
                if v <= self.tree[cl_idx]:
                    parent_idx = cl_idx
                else:
                    v -= self.tree[cl_idx]
                    parent_idx = cr_idx

        data_idx = leaf_idx - self.capacity + 1
        return leaf_idx, self.tree[leaf_idx], self.data[data_idx]

    @property
    def total_p(self):
        return self.tree[0]  # the root


class Memory(object):  # stored as ( s, a, r, s_ ) in SumTree
    """
    This Memory class is modified based on the original code from:
    https://github.com/jaara/AI-blog/blob/master/Seaquest-DDQN-PER.py
    """
    epsilon = 0.01  # small amount to avoid zero priority
    alpha = 0.6  # [0~1] convert the importance of TD error to priority
    beta = 1  # importance-sampling, from initial value increasing to 1
    beta_increment_per_sampling = 0.001
    abs_err_upper = 1.  # clipped abs error

    def __init__(self, capacity):
        self.tree = SumTree(capacity)

    def store(self, transition):
        max_p = np.max(self.tree.tree[-self.tree.capacity:]) # 找所有数据最大的p值
        if max_p == 0:
            max_p = self.abs_err_upper
        self.tree.add(max_p, transition)   # set the max p for new p 新来的都是最大的p值

    def sample(self, n):# 根据p值取数据，权重越大的被采集到的概率越大。同一个可能被采多次
        b_idx, ISWeights = np.empty((n,), dtype=np.int32), np.empty((n, 1))
        pri_seg = self.tree.total_p / n       # priority segment 对数据的总p值按n个分片能分几片
        self.beta = np.min([1., self.beta + self.beta_increment_per_sampling])  # max = 1
        b_memory = []
        min_prob = np.min(self.tree.tree[-self.tree.capacity:]) / self.tree.total_p     # for later calculate ISweight
        if min_prob == 0:
            min_prob = 0.00001
        for i in range(n):
            a, b = pri_seg * i, pri_seg * (i + 1)
            v = np.random.uniform(a, b)
            idx, p, data = self.tree.get_leaf(v) # 从每一片数据中取一个数据
            prob = p / self.tree.total_p
            ISWeights[i, 0] = np.power(prob/min_prob, -self.beta) # 第i个样本的优先级权重，由TD误差|δ(t)|归一化得到
            b_idx[i] = idx
            b_memory.append(data)
        return b_idx, b_memory, ISWeights

    def batch_update(self, tree_idx, abs_errors):
        abs_errors += self.epsilon  # convert to abs and avoid 0
        clipped_errors = np.minimum(abs_errors, self.abs_err_upper)
        ps = np.power(clipped_errors, self.alpha)
        for ti, p in zip(tree_idx, ps):
            self.tree.update(ti, p)


class BrainDQN:
    def __init__(self):
        # init replay memory
        self.action_dim = N_ACTIONS

        self.memory = Memory(capacity=REPLAY_MEMORY)
        # init some parameters
        self.timeStep = 0
        self.epsilon = INITIAL_EPSILON
        # init Q network
        self.eval_net = QNetwork()
        # init Target Q Network
        self.target_net = QNetwork()

        self.eval_net_cuda = self.eval_net.cuda()
        self.target_net_cuda = self.target_net.cuda()

        if IsLoadPkl:
            self.eval_net.load_state_dict(torch.load(PklPath))
            self.target_net.load_state_dict(torch.load(PklPath))

        # optimizer用怎样的策略更新参数，主要为了加快训练速度，减少过拟合
        self.optimizer = optim.Adam(self.eval_net.parameters(), lr=LR)
        self.lossfunc = nn.MSELoss(reduction='none')
        self.avg_loss = []  # 用于记录训练过程中的loss
        self.score = []

    def getAction(self, state):
        if self.timeStep % FRAME_PER_ACTION == 0:  # FRAME_PER_ACTION为1，所以总是走这里
            if random.random() <= self.epsilon:  # episilon greedy 这个分支是去探索
                action = random.randrange(N_ACTIONS)
            else:  # 这个分支是按照最佳的行为来
                actions_value = self.eval_net_cuda(train_transform(state).unsqueeze(0).cuda()).detach()
                action = torch.argmax(actions_value)  # 一维，取出最大值的索引
        else:
            action = 1  # do nothing

        # change episilon 随时间变小
        if self.epsilon > FINAL_EPSILON and self.timeStep > OBSERVE:
            self.epsilon -= (INITIAL_EPSILON - FINAL_EPSILON) / EXPLORE

        return action

    def store_transition(self, s, a, r, s_, done):
        transition = ReplayData(s, a, r, s_, done)
        self.memory.store(transition)  # have high priority for newly arrived transition

    def setPerception(self, cur_observ, nextObservation, action, reward, terminal):
        # one_hot_action = np.zeros(self.action_dim)
        # one_hot_action[action] = 1
        self.store_transition(cur_observ, action, reward, nextObservation, terminal)

        if self.timeStep > OBSERVE:  # 观察的足够多了，开始训练
            self.trainQNetwork()

        # print info
        state = ""
        if self.timeStep <= OBSERVE:
            state = "observe"
        elif self.timeStep > OBSERVE and self.timeStep <= OBSERVE + EXPLORE:
            state = "explore"
        else:
            state = "train"

        if self.timeStep % 50 == 0:
            print("TIMESTEP", self.timeStep, "/ STATE", state, \
                  "/ EPSILON", self.epsilon)

        self.timeStep += 1

    def trainQNetwork(self):
        # Step 1: obtain random minibatch from replay memory
        tree_idx, minibatch, ISWeights = self.memory.sample(BATCH_SIZE)
        state_batch = [data.s for data in minibatch]
        action_batch = [data.a for data in minibatch]
        reward_batch = [data.r for data in minibatch]
        next_state_batch = [data.s_ for data in minibatch]

        state_batch = torch.stack(state_batch, dim=0)
        action_batch = torch.stack(action_batch, dim=0)
        reward_batch = torch.stack(reward_batch, dim=0)
        next_state_batch = torch.stack(next_state_batch, dim=0)

        # Step 2: calculate y
        q_eval = self.eval_net_cuda(state_batch.cuda()).gather(1, action_batch.cuda())  # size (batch_size, 1)
        q_next = self.target_net_cuda(next_state_batch.cuda()).detach()
        # torch.max(actions_value, 1)返回一个二维向量，其中re[0]为Tensor的最大值，re[1]为最大值对应的index。
        # 输出都放在第一维里面[[out1], [out2],...]
        q_target = reward_batch.cuda().view(BATCH_SIZE, 1) + GAMMA * q_next.max(1)[0].view(BATCH_SIZE, 1)

        loss = self.lossfunc(q_eval.double(), q_target.double())
        writer.add_scalar('avg_loss-myprdqn32323', torch.sum(loss) / BATCH_SIZE, self.timeStep)

        self.optimizer.zero_grad()
        # 对导数加权重，retain_graph表示导数进行累加
        loss.backward(torch.from_numpy(ISWeights).cuda(), retain_graph=True)
        self.optimizer.step()

        q_eval = self.eval_net_cuda(state_batch.cuda()).gather(1, action_batch.cuda()).detach()
        abs_error = torch.abs(q_target - q_eval)
        self.memory.batch_update(tree_idx, abs_error.cpu().numpy())
        # end

        # save network every 10000 iteration
        if self.timeStep % 10000 == 0:
            torch.save(self.eval_net.state_dict(), 'saved_networks/' + str(self.timeStep) + 'network_ql.pkl')

        if self.timeStep % UPDATE_TIME == 0:
            self.target_net.load_state_dict(self.eval_net.state_dict())


def convert_action(action):
    a = np.zeros(N_ACTIONS)  # len为2的数组
    a[action] = 1
    return a


def playFlappyBird():
    # Step 1: init BrainDQN
    brain = BrainDQN()
    # Step 2: init Flappy Bird Game
    flappyBird = game.GameState()
    # Step 3: play game
    # Step 3.1: obtain init state
    a = 0
    s, _, _ = flappyBird.frame_step(convert_action(a))
    s = convert_image(s)

    # Step 3.2: run the game
    while True:
        a = brain.getAction(s)
        # flappyBird接受的action是个数组，所以用convert_action转一下
        s_, r, t = flappyBird.frame_step(convert_action(a))
        s_ = convert_image(s_)
        brain.setPerception(s, s_, a, r, t)
        s = s_

if __name__ == '__main__':
    playFlappyBird()
