import os
import random
import sys
from collections import deque

import cv2
import numpy as np
import torch
from tensorboardX import SummaryWriter
from torch import nn
from torch.nn import functional as F
from torch.autograd import Variable
from torch.optim.lr_scheduler import LambdaLR
from torch.nn import Module, Conv2d, ReLU, MaxPool2d, Flatten, Linear
from torchsummary import summary

sys.path.append("game/")
import game.wrapped_flappy_bird as game

logger = SummaryWriter(log_dir="data/log")
log_step_interval = 100  # 记录的步数间隔

GAME = 'bird'  # the name of the game being played for log files
ACTIONS = 2  # number of valid actions
GAMMA = 0.99  # decay rate of past observations
OBSERVE = 5000  # timesteps to observe before training
EXPLORE = 20000  # frames over which to anneal epsilon
FINAL_EPSILON = 0.0001  # final value of epsilon
INITIAL_EPSILON = 0.01  # starting value of epsilon
REPLAY_MEMORY = 20000  # number of previous transitions to remember
BATCH_SIZE = 32  # size of minibatch
FRAME_PER_ACTION = 1
UPDATE_TIME = 100
width = 80
height = 80
PLAY_ACTION = 0


class DeepNetWork(Module):
    def __init__(self):
        super(DeepNetWork, self).__init__()
        self.model = torch.nn.Sequential(
            Conv2d(in_channels=4, out_channels=32, kernel_size=(3, 3), stride=(1, 1), padding=1),
            ReLU(inplace=True),
            MaxPool2d(kernel_size=2),

            Conv2d(in_channels=32, out_channels=64, kernel_size=(3, 3), stride=(1, 1), padding=1),
            ReLU(inplace=True),
            MaxPool2d(kernel_size=2, ceil_mode=True),

            Conv2d(in_channels=64, out_channels=64, kernel_size=(3, 3), stride=(1, 1), padding=1),
            ReLU(inplace=True),
            MaxPool2d(kernel_size=2, ceil_mode=True),

            Conv2d(in_channels=64, out_channels=32, kernel_size=(3, 3), stride=(1, 1), padding=1),
            ReLU(inplace=True),
            MaxPool2d(kernel_size=2, ceil_mode=True),

            Flatten(),
            Linear(800, 128),
            ReLU(),
            Linear(128, ACTIONS)
        )

    def forward(self, x):
        output = self.model(x)
        # fc1 = self.model[8](x)
        return output


def preprocess(img):
    img = img[:, :400, :]
    img = cv2.resize(img, (width, height))
    img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
    ret, img = cv2.threshold(img, 1, 255, cv2.THRESH_BINARY)
    img = cv2.flip(img, 1)
    img = cv2.rotate(img, cv2.ROTATE_90_COUNTERCLOCKWISE)
    cv2.imshow("threshold", img)
    return np.reshape(img, (1, width, height))


def clear():
    if sys.platform == 'windows':
        os.system('cls')
    else:
        os.system('clear')


class BrainDQNMain(object):
    def __init__(self):
        self.replayMemory = deque()
        self.epsilon = INITIAL_EPSILON
        self.actions = 2
        self.timeStep = 0
        self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
        self.Q_net = DeepNetWork()
        self.Q_net.to(self.device)
        self.Q_netT = DeepNetWork()
        self.Q_netT.to(self.device)
        self.loss_func = torch.nn.MSELoss()
        LR = 1e-3
        self.optimizer = torch.optim.Adam(self.Q_net.parameters(), lr=LR)
        lambda1 = lambda epoch: max(0.99 ** epoch, 1e-6 / LR)
        self.scheduler = LambdaLR(self.optimizer, lr_lambda=lambda1)
        self.load()
        self.AUTOPLAY = False
        self.currentState = []
        self.outText = ""

    def save(self):
        print("\nsave model param")
        torch.save(self.Q_net.state_dict(), 'Q_net.pth')

    def load(self):
        pth = "Q_net.pth"
        if os.path.exists(pth):
            print("load model param")
            self.Q_net.load_state_dict(torch.load(pth))
            self.Q_netT.load_state_dict(torch.load(pth))

    def train(self):
        # Step 1: obtain random minibatch from replay memory
        minibatch = random.sample(self.replayMemory, BATCH_SIZE)
        # get the batch variables
        state_batch = [d[0] for d in minibatch]
        action_batch = [d[1] for d in minibatch]
        reward_batch = [d[2] for d in minibatch]
        nextState_batch = [d[3] for d in minibatch]
        terminal_batch = [d[4] for d in minibatch]
        # Step 2: calculate y
        y_batch = np.zeros([BATCH_SIZE, 1])
        nextState_batch = torch.Tensor(nextState_batch)
        nextState_batch = nextState_batch.to(self.device)
        action_batch = np.array(action_batch)
        index = action_batch.argmax(axis=1)
        # print("action " + str(index))
        index = np.reshape(index, [BATCH_SIZE, 1])
        action_batch_tensor = torch.LongTensor(index)
        action_batch_tensor = action_batch_tensor.to(self.device)
        QValue_batch = self.Q_netT(nextState_batch)
        QValue_batch = QValue_batch.detach().cpu().numpy()

        for i in range(BATCH_SIZE):
            # 这里的QValue_batch[i]为数组，大小为所有动作集合大小，QValue_batch[i],代表
            # 做所有动作的Q值数组，y计算为如果游戏停止，y=rewaerd[i],如果没停止，则y=reward[i]+gamma*np.max(QValue[i])
            # 代表当前y值为当前reward+未来预期最大值*gamma(gamma:经验系数)
            if terminal_batch[i]:
                y_batch[i][0] = reward_batch[i]
            else:
                y_batch[i][0] = reward_batch[i] + GAMMA * np.max(QValue_batch[i])

        y_batch = np.array(y_batch).reshape([BATCH_SIZE, 1])
        y_batch_tensor = Variable(torch.Tensor(y_batch))
        y_batch_tensor = y_batch_tensor.to(self.device)
        state_batch_tensor = Variable(torch.Tensor(state_batch))
        state_batch_tensor = state_batch_tensor.to(self.device)

        y_predict = self.Q_net(state_batch_tensor).gather(1, action_batch_tensor)
        loss = self.loss_func(y_predict, y_batch_tensor)
        # print("loss is " + str(loss))
        self.optimizer.zero_grad()
        loss.backward()
        self.optimizer.step()

        if self.timeStep % log_step_interval == 0:
            logger.add_scalar("train loss", loss.item(), global_step=self.timeStep - OBSERVE)

        if self.timeStep % UPDATE_TIME == 0:
            self.Q_netT.load_state_dict(self.Q_net.state_dict())
            self.save()

        if self.timeStep % 10000 == 0:
            torch.save(self.Q_net.state_dict(), 'Q_net{}.pth'.format(self.timeStep - OBSERVE))

        if self.timeStep % 100 == 0:
            self.scheduler.step()

    def getAction(self):
        currentState = torch.Tensor([self.currentState])
        currentState = currentState.to(self.device)
        QValue = self.Q_net(currentState)[0]
        action = np.zeros(self.actions)
        if self.timeStep % FRAME_PER_ACTION == 0:
            if random.random() <= self.epsilon:
                action_index = random.randrange(self.actions)
                self.outText += "choose random action " + str(action_index)
                # print("choose random action " + str(action_index), end="|")
                action[action_index] = 1
            else:
                action_index = np.argmax(QValue.detach().cpu().numpy())
                self.outText += "choose qnet   action " + str(action_index)
                # print("choose qnet value action " + str(action_index), end="|")
                action[action_index] = 1
        else:
            action[0] = 1  # do nothing

        # change episilon
        if self.epsilon > FINAL_EPSILON and self.timeStep > OBSERVE:
            self.epsilon -= (INITIAL_EPSILON - FINAL_EPSILON) / (EXPLORE * 0.1)
        return action

    def setInitState(self, img0):
        self.currentState = np.stack((img0, img0, img0, img0), axis=0)
        print(self.currentState.shape)

    def setPerception(self, nextObservation, action, reward, terminal):  # print(nextObservation.shape)
        newState = np.append(self.currentState[1:, :, :], nextObservation,
                             axis=0)  # newState = np.append(nextObservation,self.currentState[:,:,1:],axis = 2)
        if self.AUTOPLAY:
            self.currentState = newState
            return 0

        self.replayMemory.append((self.currentState, action, reward, newState, terminal))
        if len(self.replayMemory) > REPLAY_MEMORY:
            self.replayMemory.popleft()
        if self.timeStep > OBSERVE:  # Train the network
            self.train()

        # print info
        state = ""
        if self.timeStep <= OBSERVE:
            state = state + "observe"
        elif OBSERVE < self.timeStep <= OBSERVE + EXPLORE:
            state = state + "explore"
        else:
            state = state + "train"

        LR = self.optimizer.param_groups[0]['lr']
        self.outText += " | TIMESTEP " + str(self.timeStep) + " | STATE " + str(state) + " | EPSILON " + str(
            self.epsilon) + " | LR " + str(LR)

        print("\r" + self.outText, end='')
        self.outText = ""
        # print("TIMESTEP", self.timeStep, "| STATE", state, "| EPSILON", self.epsilon)
        self.currentState = newState
        self.timeStep += 1

    def autoplay(self, nextObservation):
        newState = np.append(self.currentState[1:, :, :], nextObservation,
                             axis=0)
        self.currentState = newState


def main():
    # Step 1: init BrainDQN
    brain = BrainDQNMain()
    # Step 2: init Flappy Bird Game
    flappyBird = game.GameState()
    # Step 3: play game
    action0 = np.array([1, 0])  # do nothing
    observation0, reward0, terminal = flappyBird.frame_step(action0)
    observation0 = observation0[:, :400, :]
    observation0 = cv2.cvtColor(cv2.resize(observation0, (width, height)), cv2.COLOR_BGR2GRAY)
    ret, observation0 = cv2.threshold(observation0, 1, 255, cv2.THRESH_BINARY)
    observation0 = cv2.flip(observation0, 1)
    observation0 = cv2.rotate(observation0, cv2.ROTATE_90_COUNTERCLOCKWISE)
    brain.setInitState(observation0)
    brain.AUTOPLAY = True
    while 1:
        # if brain.timeStep < 5000:
        #     action = play()
        # else:

        action = brain.getAction()
        nextObservation, reward, terminal = flappyBird.frame_step(action)
        nextObservation = preprocess(nextObservation)
        brain.setPerception(nextObservation, action, reward, terminal)


if __name__ == "__main__":
    # main()
    brain = DeepNetWork()
    brain.cuda()
    batch_size = 32
    summary(brain, input_size=(4, width, height))
