from collections import deque

import torch
import torch.nn as nn
import torch.optim as optim
from torch.autograd import Variable
import torch.nn.functional as F
import torchvision.transforms as transforms
from torch.distributions import Categorical
import torch.utils.data as Data
import matplotlib.pyplot as plt
import time
import cv2
import sys
import copy
import random

from torchvision.transforms import ToPILImage

sys.path.append("game/")
import wrapped_flappy_bird as game
import numpy as np

# Hyper Parameters:
FRAME_PER_ACTION = 1
GAMMA = 0.99  # decay rate of past observations
OBSERVE = 5000.  # timesteps to observe before training
EXPLORE = 0.  # frames over which to anneal epsilon
FINAL_EPSILON = 0.  # 0.001 # final value of epsilon
INITIAL_EPSILON = 0.  # 0.01 # starting value of epsilon
REPLAY_MEMORY = 50000  # number of previous transitions to remember
BATCH_SIZE = 64  # size of minibatch
UPDATE_TIME = 100
LR=1e-7

N_ACTIONS = 2
ENV_A_SHAPE = (2, 1)

IsLoadPkl = True # 是否加载训练好的pkl文件
PklPath = 'saved_networks/265000network_ql.pkl'


class MyDataset(Data.Dataset):
    def __init__(self, x=None, y=None):
        self.s = deque()
        self.a = deque()
        self.r = deque()
        self.s_ = deque()
        self.t = deque()
        if y is not None:
            self.y = torch.LongTensor(y)

    def __len__(self):
        return len(self.s)

    def __getitem__(self, index):
        return self.s[index], self.a[index], self.r[index], self.s_[index], self.t[index];

    def __add__(self, s, a, r, s_, t):
        self.s.append(train_transform(s))
        self.a.append(torch.tensor([a], dtype=torch.long))
        self.r.append(r)
        self.s_.append(train_transform(s_))
        self.t.append(t)
        if len(self.s) > REPLAY_MEMORY:
            self.s.popleft()
            self.a.popleft()
            self.r.popleft()
            self.s_.popleft()
            self.t.popleft()


class BrainDQN:
    def __init__(self):
        # init replay memory
        self.dataset = MyDataset()
        # init some parameters
        self.timeStep = 0
        self.epsilon = INITIAL_EPSILON
        # init Q network 放torch的network
        self.eval_net = PolicyGradientNetwork()
        # init Target Q Network
        self.target_net = PolicyGradientNetwork()

        self.eval_net_cuda = self.eval_net.cuda()
        self.target_net_cuda = self.target_net.cuda()
        if IsLoadPkl:
            self.eval_net.load_state_dict(torch.load(PklPath))
            self.target_net.load_state_dict(torch.load(PklPath))

        # optimizer用怎样的策略更新参数，主要为了加快训练速度，减少过拟合
        self.optimizer = optim.Adam(self.eval_net.parameters(), lr=LR)
        self.lossfunc = nn.MSELoss()
        self.avg_loss = []

    def sample(self, state):
        input_image = train_transform(state).unsqueeze(0)
        action_prob = self.eval_net(input_image.unsqueeze(0))  # 这一步把数据放到网络中，得到结果，也即行为的概率
        # 作用是创建以参数probs为标准的类别分布，样本是来自 “0 … K - 1” 的整数，其中
        # K 是probs参数的长度。也就是说，按照传入的probs中给定的概率，在相应的位置处进行取样，取样返回的是该位置的整数索引。
        # 如果 probs 是长度为 K 的一维列表，则每个元素是对该索引处的类进行抽样的相对概率。
        # 如果 probs 是二维的，它被视为一批概率向量。
        action_dist = Categorical(action_prob)
        action = action_dist.sample()
        log_prob = action_dist.log_prob(action)
        return action.item(), log_prob


    def trainQNetwork(self):
        loader = Data.DataLoader(
            dataset=self.dataset,  # 数据，封装进Data.TensorDataset()类的数据
            batch_size=BATCH_SIZE,  # 每块的大小
            shuffle=True,  # 要不要打乱数据
        )

        for step, (s, a, r, s_, t) in enumerate(loader):
            # gather可以让action对应相应的value，因为有些行为是随机的
            q_eval = self.eval_net_cuda(s.cuda()).gather(1, a.cuda())
            q_next = self.target_net_cuda(s_.cuda()).detach()

            # torch.max(actions_value, 1)返回一个二维向量，其中re[0]为Tensor的最大值，re[1]为最大值对应的index。
            # 输出都放在第一维里面[[out1], [out2],...]
            #q_target = r.view(BATCH_SIZE, 1) + GAMMA * q_next.max(1)[0].view(BATCH_SIZE, 1) #if not t else -1# shape (batch, 1)
            q_target = r.cuda().view(BATCH_SIZE, 1) + GAMMA * q_next.max(1)[0].view(BATCH_SIZE, 1)
            # print出来一些数据
            # print('Step: ', step, '| q_eval ', q_eval, '| q_target ', q_target)
            if self.timeStep % 500 == 0:
                print('q_eval ', q_eval, 'q_target ', q_target)
            loss = self.lossfunc(q_eval.double(), q_target.double())
            self.optimizer.zero_grad()
            loss.backward()
            self.optimizer.step()
            if self.timeStep % 2 == 0:
                self.avg_loss.append(loss.item() / BATCH_SIZE)
            if self.timeStep % 10000 == 0:
                plt.plot(self.avg_loss)
                plt.title("Loss")
                plt.savefig('saved_networks/'+ str(self.timeStep) + '.png')
            break

        # save network every 10000 iteration
        if self.timeStep % 5000 == 0:
            torch.save(self.eval_net.state_dict(), 'saved_networks/' + str(self.timeStep) + 'network_ql.pkl')

        if self.timeStep % UPDATE_TIME == 0:
            self.target_net.load_state_dict(self.eval_net.state_dict())


    def setPerception(self, nextObservation, action, reward, terminal):
        newState =  nextObservation
        self.dataset.__add__(self.currentState, action, reward, newState, terminal)

        if self.timeStep > OBSERVE:  # 观察的足够多了
            self.trainQNetwork()

        # print info
        state = ""
        if self.timeStep <= OBSERVE:
            state = "observe"
        elif self.timeStep > OBSERVE and self.timeStep <= OBSERVE + EXPLORE:
            state = "explore"
        else:
            state = "train"

        if self.timeStep % 50 == 0:
            print("TIMESTEP", self.timeStep, "/ STATE", state, \
              "/ EPSILON", self.epsilon)

        self.currentState = newState
        self.timeStep += 1

    def getAction(self):
        if self.timeStep % FRAME_PER_ACTION == 0:  # FRAME_PER_ACTION为1，总是走这里
            if random.random() <= self.epsilon:  # episilon greedy 这个分支是去探索
                action = random.randrange(N_ACTIONS)
            else: # 这个分支是按照最佳的行为来
                actions_value = self.eval_net(train_transform(self.currentState).unsqueeze(0).cuda()).detach()
                # action = torch.max(actions_value, 1)[1].data.numpy()
                # action = action.reshape(ENV_A_SHAPE)
                action = torch.argmax(actions_value)  # 一维，取出最大值的索引
                #print("specify", action)
        else:
            action = 1  # do nothing

        # change episilon 随时间变小
        if self.epsilon > FINAL_EPSILON and self.timeStep > OBSERVE:
            self.epsilon -= (INITIAL_EPSILON - FINAL_EPSILON) / EXPLORE

        return action

    def setInitState(self, observation):
        self.currentState = observation


# preprocess raw image to 80*80 gray image
def convert_image(observation):
    observation = cv2.cvtColor(cv2.resize(observation, (80, 80)), cv2.COLOR_BGR2GRAY)
    ret, observation = cv2.threshold(observation, 1, 255, cv2.THRESH_BINARY)
    return np.reshape(observation, (80, 80, 1))

def convert_action(action):
    a = np.zeros(N_ACTIONS)  # len为2的数组
    a[action] = 1
    return a

class PolicyGradientNetwork(nn.Module):  # 定义了actor的结构
    def __init__(self):
        super().__init__()
        self.cnn = nn.Sequential(
            #  conv1层，输入的灰度图，所以 in_channels=1, out_channels=40 说明使用了40个滤波器/卷积核，
            # kernel_size=5卷积核大小5x5
            # input 維度 [1, 80, 80]
            nn.Conv2d(1, 8, 3, 1, 1),  # [8, 80, 80]
            nn.BatchNorm2d(8),
            nn.ReLU(),
            nn.MaxPool2d(2, 2, 0),  # [8, 40, 40]

            nn.Conv2d(8, 16, 3, 1, 1),  # [16, 40, 40]
            nn.BatchNorm2d(16),
            nn.ReLU(),
            nn.MaxPool2d(2, 2, 0),  # [16, 20, 20]

            nn.Conv2d(16, 16, 3, 1, 1),  # [16, 20, 20]
            nn.BatchNorm2d(16),
            nn.ReLU(),
            nn.MaxPool2d(2, 2, 0),  # [16, 10, 10]
        )

        self.fc = nn.Sequential(  # 定义全连接层，共三层，使用Linear激活函数
            nn.Linear(16 * 10 * 10, 512),
            nn.ReLU(),
            nn.Linear(512, 2),
            # nn.Tanh(),
            # nn.Linear(80, 2)
        )

    def forward(self, state):
        out = self.cnn(state)
        out = out.view(out.size()[0], -1)
        out = self.fc(out)
        return out
        # return F.softmax(out, dim=-1)  # softmax函数把值转化为概率


train_transform = transforms.Compose([
    transforms.ToPILImage(),
    transforms.ToTensor(),  # 將圖片轉成 Tensor，並把數值normalize到[0,1](data normalization)
])

def playFlappyBird():
    # Step 1: init BrainDQN
    brain = BrainDQN()
    # Step 2: init Flappy Bird Game
    flappyBird = game.GameState()
    # Step 3: play game
    # Step 3.1: obtain init state
    action0 = np.array([1, 0])  # do nothing
    observation0, reward0, terminal = flappyBird.frame_step(action0)
    observation0 = cv2.cvtColor(cv2.resize(observation0, (80, 80)), cv2.COLOR_BGR2GRAY)
    ret, observation0 = cv2.threshold(observation0, 1, 255, cv2.THRESH_BINARY)
    brain.setInitState(observation0)

    # Step 3.2: run the game
    while True:
        action = brain.getAction()
        # flappyBird接受的action是个数组，训练需要的是个数字
        nextObservation, reward, terminal = flappyBird.frame_step(convert_action(action))
        nextObservation = convert_image(nextObservation)
        brain.setPerception(nextObservation, action, reward, terminal)


def main():
    playFlappyBird()


if __name__ == '__main__':
    main()
