from collections import deque
import torch
import torch.nn as nn
import torch.optim as optim
import torchvision.transforms as transforms
from torch.distributions import Categorical
import torch.utils.data as Data
import time
import cv2
import sys
import random
sys.path.append("game/")
import wrapped_flappy_bird as game
import numpy as np

from torch.utils.tensorboard import SummaryWriter
writer = SummaryWriter('D:/PythonProject/tensorboarddir')

# Hyper Parameters:
FRAME_PER_ACTION = 1
GAMMA = 0.99  # decay rate of past observations
OBSERVE = 3000.  # timesteps to observe before training
EXPLORE = 150000.  # frames over which to anneal epsilon
FINAL_EPSILON = 0.0  # 0.001 # final value of epsilon
INITIAL_EPSILON = 0.0  # 0.01 # starting value of epsilon
REPLAY_MEMORY = 32768  # 满二叉树的节点个数：2的k次方-1
BATCH_SIZE = 64  # size of minibatch
UPDATE_TIME = 100
LR = 1e-3
N_ACTIONS = 2  # 可采取的action的数量

IsLoadPkl = True  # 是否加载训练好的pkl文件
PklPath = 'saved_networks/570000network_ql.pkl'


# preprocess raw image to 80*80 gray image
def convert_image(observation):
    #writer.add_image('observ1', observation, global_step=None, walltime=None, dataformats='HWC')
    observation = cv2.cvtColor(cv2.resize(observation, (80, 80)), cv2.COLOR_BGR2GRAY)
    #writer.add_image('observ', observation, global_step=None, walltime=None, dataformats='HW')

    ret, observation = cv2.threshold(observation, 1, 255, cv2.THRESH_BINARY)

    return np.reshape(observation, (80, 80, 1))


train_transform = transforms.Compose([
    transforms.ToPILImage(),
    transforms.ToTensor(),  # 將圖片轉成 Tensor，並把數值normalize到[0,1](data normalization)
])


class ReplayDataset(Data.Dataset):
    def __init__(self, x=None, y=None):
        self.s = deque()
        self.a = deque()
        self.r = deque()
        self.s_ = deque()
        self.t = deque()
        if y is not None:
            self.y = torch.LongTensor(y)

    def __len__(self):
        return len(self.s)

    def __getitem__(self, index):
        return self.s[index], self.a[index], self.r[index], self.s_[index], self.t[index];

    def __add__(self, s, a, r, s_, t):
        self.s.append(train_transform(s))
        self.a.append(torch.tensor([a], dtype=torch.long))
        self.r.append(r)
        self.s_.append(train_transform(s_))
        self.t.append(t)
        if len(self.s) > REPLAY_MEMORY:
            self.s.popleft()
            self.a.popleft()
            self.r.popleft()
            self.s_.popleft()
            self.t.popleft()


class QNetwork(nn.Module):  # 定义了actor的结构
    def __init__(self):
        super().__init__()
        self.cnn = nn.Sequential(
            # conv1层，输入的灰度图，所以 in_channels=1, out_channels=8 说明使用了8个滤波器/卷积核，
            # kernel_size=3卷积核大小3x3
            # input 維度 [1, 80, 80]
            nn.Conv2d(1, 8, 3, 1, 1),  # [8, 80, 80] 这边的注释的是输出的维度
            nn.BatchNorm2d(8),
            nn.ReLU(),
            nn.MaxPool2d(2, 2, 0),  # [8, 40, 40]

            nn.Conv2d(8, 16, 3, 1, 1),  # [16, 40, 40]
            nn.BatchNorm2d(16),
            nn.ReLU(),
            nn.MaxPool2d(2, 2, 0),  # [16, 20, 20]

            nn.Conv2d(16, 16, 3, 1, 1),  # [16, 20, 20]
            nn.BatchNorm2d(16),
            nn.ReLU(),
            nn.MaxPool2d(2, 2, 0),  # [16, 10, 10]
        )

        self.fc = nn.Sequential(  # 定义全连接层，使用Linear激活函数
            nn.Linear(16 * 10 * 10, 512),
            nn.ReLU(),
            nn.Linear(512, 2),
        )

    def forward(self, state):
        out = self.cnn(state)
        out = out.view(out.size()[0], -1)
        out = self.fc(out)
        return out
        # return F.softmax(out, dim=-1)  # 用softmax参数不好更新


class BrainDQN:
    def __init__(self):
        # init replay memory
        self.dataset = ReplayDataset()
        # init some parameters
        self.timeStep = 0
        self.epsilon = INITIAL_EPSILON
        # init Q network
        self.eval_net = QNetwork()
        # init Target Q Network
        self.target_net = QNetwork()

        self.eval_net_cuda = self.eval_net.cuda()
        self.target_net_cuda = self.target_net.cuda()

        if IsLoadPkl:
            self.eval_net.load_state_dict(torch.load(PklPath))
            self.target_net.load_state_dict(torch.load(PklPath))

        # optimizer用怎样的策略更新参数，主要为了加快训练速度，减少过拟合
        self.optimizer = optim.Adam(self.eval_net.parameters(), lr=LR)
        self.lossfunc = nn.MSELoss()
        self.avg_loss = []  # 用于记录训练过程中的loss
        self.score = []

    def getAction(self, state):
        if self.timeStep % FRAME_PER_ACTION == 0:  # FRAME_PER_ACTION为1，所以总是走这里
            if random.random() <= self.epsilon:  # episilon greedy 这个分支是去探索
                action = random.randrange(N_ACTIONS)
            else:  # 这个分支是按照最佳的行为来
                actions_value = self.eval_net_cuda(train_transform(state).unsqueeze(0).cuda()).detach()
                action = torch.argmax(actions_value)  # 一维，取出最大值的索引
        else:
            action = 1  # do nothing

        # change episilon 随时间变小
        if self.epsilon > FINAL_EPSILON and self.timeStep > OBSERVE:
            self.epsilon -= (INITIAL_EPSILON - FINAL_EPSILON) / EXPLORE
        return action

    def setPerception(self, cur_observ, nextObservation, action, reward, terminal):
        self.dataset.__add__(cur_observ, action, reward, nextObservation, terminal)
        # writer.add_image('observ' + str(self.timeStep), cur_observ, dataformats='HWC')

        if self.timeStep > OBSERVE:  # 观察的足够多了，开始训练
            self.trainQNetwork()

        # print info
        state = ""
        if self.timeStep <= OBSERVE:
            state = "observe"
        elif self.timeStep > OBSERVE and self.timeStep <= OBSERVE + EXPLORE:
            state = "explore"
        else:
            state = "train"

        if self.timeStep % 50 == 0:
            print("TIMESTEP", self.timeStep, "/ STATE", state, "/ EPSILON", self.epsilon)

        self.timeStep += 1

    def trainQNetwork(self):
        loader = Data.DataLoader(
            dataset=self.dataset,  # 数据，封装进Data.TensorDataset()类的数据
            batch_size=BATCH_SIZE,  # 每块的大小
            shuffle=True,  # 要不要打乱数据
        )

        for step, (s, a, r, s_, t) in enumerate(loader):
            # gather可以让action对应相应的value，因为有些行为是随机的
            q_eval = self.eval_net_cuda(s.cuda()).gather(1, a.cuda())  # size (batch_size, 1)
            q_next = self.target_net_cuda(s_.cuda()).detach()

            # torch.max(actions_value, 1)返回一个二维向量，其中re[0]为Tensor的最大值，re[1]为最大值对应的index。
            # 输出都放在第一维里面[[out1], [out2],...]
            q_target = r.cuda().view(BATCH_SIZE, 1) + GAMMA * q_next.max(1)[0].view(BATCH_SIZE,
                                                                                    1)  # size (batch_size, 1)
            # print出来一些数据
            # print('Step: ', step, '| q_eval ', q_eval, '| q_target ', q_target)

            loss = self.lossfunc(q_eval.double(), q_target.double())
            self.optimizer.zero_grad()
            loss.backward()
            self.optimizer.step()

            # if self.timeStep % 50 == 0:
            #     self.avg_loss.append(loss.item() / BATCH_SIZE)
            #self.avg_loss.append(loss.item() / BATCH_SIZE)

            writer.add_scalar('mydqn-2-cmppr-1', loss.item(), self.timeStep)
            # writer.add_scalar('lr-mydqn-2-cmppr', self.optimizer.state_dict()['param_groups'][0]['lr'], self.timeStep)
            break  # 每帧只训练一个batchsize的数据

        # save network every 10000 iteration
        if self.timeStep % 10000 == 0:
            torch.save(self.eval_net.state_dict(), 'saved_networks/' + str(self.timeStep) + 'network_ql.pkl')

        if self.timeStep % UPDATE_TIME == 0:
            self.target_net.load_state_dict(self.eval_net.state_dict())


def convert_action(action):
    a = np.zeros(N_ACTIONS)  # len为2的数组
    a[action] = 1
    return a


def playFlappyBird():
    # Step 1: init BrainDQN
    brain = BrainDQN()
    # Step 2: init Flappy Bird Game
    flappyBird = game.GameState()
    # Step 3: play game
    # Step 3.1: obtain init state
    a0 = 0;
    s0, r0, t0 = flappyBird.frame_step(convert_action(a0))
    s0 = convert_image(s0)

    # Step 3.2: run the game
    step = 0
    while step < OBSERVE:
        step += 1
        a1 = brain.getAction(s0)
        # flappyBird接受的action是个数组，所以用convert_action转一下
        s1, r1, t1 = flappyBird.frame_step(convert_action(a1))
        s1 = convert_image(s1)
        brain.setPerception(s0, s1, a0, r0, t0)
        s0, a0, r0, t0 = s1, a1, r1, t1

if __name__ == '__main__':
    playFlappyBird()
