import torch
import torch.nn as nn
import torch.optim as optim
from torch.autograd import Variable
import torch.nn.functional as F
import torchvision.transforms as transforms
from torch.distributions import Categorical
import time
import cv2
import sys
import copy

from torchvision.transforms import ToPILImage

sys.path.append("game/")
import wrapped_flappy_bird as game

import numpy as np


# preprocess raw image to 80*80 gray image
def convert_image(observation):
    observation = cv2.cvtColor(cv2.resize(observation, (80, 80)), cv2.COLOR_BGR2GRAY)
    ret, observation = cv2.threshold(observation, 1, 255, cv2.THRESH_BINARY)
    return np.reshape(observation, (80, 80, 1))


"""Pytorch中神经网络模块化接口nn的了解"""
"""
torch.nn是专门为神经网络设计的模块化接口。nn构建于autograd之上，可以用来定义和运行神经网络。
nn.Module是nn中十分重要的类,包含网络各层的定义及forward方法。
定义自已的网络：
    需要继承nn.Module类，并实现forward方法。
    一般把网络中具有可学习参数的层放在构造函数__init__()中，
    不具有可学习参数的层(如ReLU)可放在构造函数中，也可不放在构造函数中(而在forward中使用nn.functional来代替)

    只要在nn.Module的子类中定义了forward函数，backward函数就会被自动实现(利用Autograd)。
    在forward函数中可以使用任何Variable支持的函数，毕竟在整个pytorch构建的图中，是Variable在流动。还可以使用
    if,for,print,log等python语法.

    注：Pytorch基于nn.Module构建的模型中，只支持mini-batch的Variable输入方式，
    比如，只有一张输入图片，也需要变成 N x C x H x W 的形式：

    input_image = torch.FloatTensor(1, 28, 28)
    input_image = Variable(input_image)
    input_image = input_image.unsqueeze(0)   # 1 x 1 x 28 x 28

"""


class PolicyGradientNetwork(nn.Module):  # 定义了actor的结构
    def __init__(self):
        super().__init__()
        self.cnn = nn.Sequential(
            #  conv1层，输入的灰度图，所以 in_channels=1, out_channels=40 说明使用了40个滤波器/卷积核，
            # kernel_size=5卷积核大小5x5
            # input 維度 [1, 80, 80]
            nn.Conv2d(1, 8, 3, 1, 1),  # [8, 80, 80]
            nn.BatchNorm2d(8),
            nn.ReLU(),
            nn.MaxPool2d(2, 2, 0),  # [8, 40, 40]

            nn.Conv2d(8, 16, 3, 1, 1),  # [16, 40, 40]
            nn.BatchNorm2d(16),
            nn.ReLU(),
            nn.MaxPool2d(2, 2, 0),  # [16, 20, 20]

            nn.Conv2d(16, 16, 3, 1, 1),  # [16, 20, 20]
            nn.BatchNorm2d(16),
            nn.ReLU(),
            nn.MaxPool2d(2, 2, 0),  # [16, 10, 10]
        )

        self.fc = nn.Sequential(  # 定义全连接层，共三层，使用tanh激活函数
            nn.Linear(16 * 10 * 10, 160),
            nn.Tanh(),
            nn.Linear(160, 80),
            nn.Tanh(),
            nn.Linear(80, 2)
        )

    def forward(self, state):
        out = self.cnn(state)
        out = out.view(out.size()[0], -1)
        out = self.fc(out)
        print(out.size())
        return F.softmax(out, dim=-1)  # softmax函数把值转化为概率

train_transform = transforms.Compose([
    transforms.ToPILImage(),
    transforms.ToTensor(),  # 將圖片轉成 Tensor，並把數值normalize到[0,1](data normalization)
])

class PolicyGradientAgent():
    def __init__(self, network):
        self.network = network
        # optimizer用怎样的策略更新参数，主要为了加快训练速度，减少过拟合
        self.optimizer = optim.SGD(self.network.parameters(), lr=0.001)

    def learn(self, log_probs, rewards):
        loss = (-log_probs * rewards).sum()  # 每一步的概率 * 最终的reward，然后相加，这是policy gradient的做法

        self.optimizer.zero_grad()
        loss.backward()
        self.optimizer.step()

    def sample(self, state):
        input_image = train_transform(state)
        action_prob = self.network(input_image.unsqueeze(0))  # 这一步把数据放到网络中，得到结果，也即行为的概率
        # 作用是创建以参数probs为标准的类别分布，样本是来自 “0 … K - 1” 的整数，其中
        # K 是probs参数的长度。也就是说，按照传入的probs中给定的概率，在相应的位置处进行取样，取样返回的是该位置的整数索引。
        # 如果 probs 是长度为 K 的一维列表，则每个元素是对该索引处的类进行抽样的相对概率。
        # 如果 probs 是二维的，它被视为一批概率向量。
        action_dist = Categorical(action_prob)
        action = action_dist.sample()
        log_prob = action_dist.log_prob(action)
        return action.item(), log_prob


def main():
    # Step 1: init BrainDQN
    actions = 2
    network = PolicyGradientNetwork()
    agent = PolicyGradientAgent(network)
    # Step 2: init Flappy Bird Game
    flappyBird = game.GameState()
    # Step 3: play game
    # Step 3.1: obtain init state
    action0 = np.array([1, 0])  # do nothing
    observation0, reward0, terminal = flappyBird.frame_step(action0)
    observation0 = cv2.cvtColor(cv2.resize(observation0, (80, 80)), cv2.COLOR_BGR2GRAY)
    ret, observation0 = cv2.threshold(observation0, 1, 255, cv2.THRESH_BINARY)
    state = observation0
    agent.network.load_state_dict(torch.load('rl_round.pkl'))

    agent.network.train()  # 訓練前，先確保 network 處在 training 模式
    EPISODE_PER_BATCH = 5  # 每 5 次 更新一次 agent
    NUM_BATCH = 40000  # 總共更新 400 次
    avg_total_rewards, avg_final_rewards = [], []

    cha = 1.5 # 每輪更新bias和reward的差值，過幾輪更新一下bias
    bias = cha

    np.set_printoptions(precision=1)

    for batch in range(NUM_BATCH):
        log_probs, rewards = [], []
        total_rewards, final_rewards = [], []


        epoch_start_time = time.time()

        for episode in range(EPISODE_PER_BATCH):
            total_reward, total_step = 0, 0

            total_my_rewards = [[] for i in range(EPISODE_PER_BATCH)]  # 创建的是多行EPISODE_PER_BATCH列的二维列表
            my_rewards = []
            while True:

                action, log_prob = agent.sample(state)  # 返回action和概率的log值
                if action == 0:
                    action = [1, 0]
                else:
                    action = [0, 1]

                nextObservation, reward, terminal = flappyBird.frame_step(action)
                nextObservation = convert_image(nextObservation)

                log_probs.append(log_prob)
                state = nextObservation
                total_reward += reward
                my_rewards.append(reward)
                total_step += 1

                if terminal:
                    final_rewards.append(reward)
                    total_rewards.append(total_reward)
                    rewards.append(np.full(total_step, total_reward))  # 設定同一個 episode 每個 action 的 reward 都是 total reward
                    cha = cha + total_reward - bias
                    last = my_rewards[0]

                    total_reward -= bias
                    # print("before:")
                    # print(total_reward)
                    # print(np.array(my_rewards))
                    tmplist = []
                    for i, data in enumerate(my_rewards):
                        if i == 0:
                            tmplist.append(total_reward - 0.0)
                            continue
                        tmplist.append(tmplist[i - 1] - my_rewards[i - 1])# 上一步的贡献值減去上一步reward值
                    # print("after:")
                    # print(np.array(tmplist))
                    total_my_rewards[episode] = copy.deepcopy(tmplist)
                    break

        # 紀錄訓練過程
        avg_total_reward = sum(total_rewards) / len(total_rewards)
        avg_final_reward = sum(final_rewards) / len(final_rewards)
        avg_total_rewards.append(avg_total_reward)
        avg_final_rewards.append(avg_final_reward)
        print("Batch {},\t{:.2f} sec(s)\tTotal Reward = {:.1f},\tFinal Reward = {:.1f} \tbias {}".format(
            batch + 1, time.time() - epoch_start_time, avg_total_reward, avg_final_reward, bias))

        # 更新網路
        # rewards = np.concatenate(rewards, axis=0)
        # rewards = (rewards - np.mean(rewards)) / (np.std(rewards) + 1e-9)  # 將 reward 正規標準化
        # agent.learn(torch.stack(log_probs), torch.from_numpy(rewards))

        total_my_rewards = np.concatenate(total_my_rewards, axis=0)
        total_my_rewards = (total_my_rewards - np.mean(total_my_rewards)) / (np.std(total_my_rewards) + 1e-9)  # 將 reward 正規標準化
        agent.learn(torch.stack(log_probs), torch.from_numpy(total_my_rewards))

        if batch % 100 == 0:
            torch.save(agent.network.state_dict(), 'rl_round.pkl')
            print("cha:")
            print(cha)
            bias += (cha / 100.0 / 5.0)
            cha = 0.0




if __name__ == '__main__':
    main()
