import logging
import numpy as np
import matplotlib.pyplot as plt
import gym
import sys

import torch
from torch import nn
from torch import optim

from map import Map
from sim_world_wrapper import SimWorldWrapper

class Reinforce():
    def __init__(self):
        self.initLogger()
        self._robot_num = 15
        self._task_num = 100
        self._map = Map('map.txt')
        # 把整个时间窗作为“状态”，因为时间窗包含机器人进入边的时间和从边出去的时间，故需乘以2。
        self._n_inputs = 2 * self._robot_num * self._map.getEdgesNum()
        # 把“发起一个从src到dst的任务”作为一个动作。src/dst都必须是叶子节点，或者None
        # 当src为None，dst不为None时，相当于发起一个要求机器人到dst的任务，不论机器人当前的位置。
        # 当src不为None，dst为None时，相当于发起一个要求机器人到src的任务，不论机器人当前的位置。
        # 当src，dst均为None时，不发起任何任务。
        # 当src，dst均不为None时，发起一个要求机器人先到src，再到dst的任务。
        self._leaf_nodes = self._map.getLeafNodes()
        self._edges = self._map.getAllEdges()
        self._n_outputs = (len(self._leaf_nodes) + 1) ** 2
        self.createNeuralNetwork()
        
    def initLogger(self):
        #level = logging.DEBUG
        level = logging.WARNING
        # create logger
        self._logger = logging.getLogger('MyTCS')
        self._logger.setLevel(level)

        # create file handler and set level
        fh = logging.FileHandler(filename='log.txt')
        fh.setLevel(level)

        # create formatter
        formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')

        # add formatter to fh
        fh.setFormatter(formatter)

        # add fh to logger
        self._logger.addHandler(fh)
    
    def timeWindow2StateVector(self, tw):
        ret = None
        for e in self._edges:
            # Since the edges (a, b) and (b, a) are the same resource,
            # we need to remove ambiguities.
            e = (min(e[0], e[1]), max(e[0], e[1]))
            if ret is None:
                ret = np.concatenate((tw.t_i[e], tw.t_o[e]))
            else:
                ret = np.concatenate((ret, tw.t_i[e]))
                ret = np.concatenate((ret, tw.t_o[e]))
        return ret
        
    def action2SrcDst(self, action):
        # 把“发起一个从src到dst的任务”作为一个动作。src/dst都必须是叶子节点，或者None
        # 当src为None，dst不为None时，相当于发起一个要求机器人到dst的任务，不论机器人当前的位置。
        # 当src不为None，dst为None时，相当于发起一个要求机器人到src的任务，不论机器人当前的位置。
        # 当src，dst均为None时，不发起任何任务。
        # 当src，dst均不为None时，发起一个要求机器人先到src，再到dst的任务。
        src_idx, dst_idx = divmod(action, len(self._leaf_nodes) + 1)
        if src_idx < len(self._leaf_nodes):
            src = self._leaf_nodes[src_idx]
        else:
            src = None
        if dst_idx < len(self._leaf_nodes):
            dst = self._leaf_nodes[dst_idx]
        else:
            dst = None
        return src, dst
    
    def createNeuralNetwork(self):
        self._network = nn.Sequential(
            nn.Linear(self._n_inputs, 16), 
            nn.ReLU(), 
            nn.Linear(16, self._n_outputs),
            nn.Softmax(dim=-1))
    
    def predictActionProb(self, tw):
        action_probs = self._network(torch.FloatTensor(self.timeWindow2StateVector(tw)))
        return action_probs
        
    def discountRewards(self, rewards, gamma=0.99):
        r = np.array([gamma**i * rewards[i] 
                      for i in range(len(rewards))])
        # Reverse the array direction for cumsum and then
        # revert back to the original order
        r = r[::-1].cumsum()[::-1]
        return r - r.mean()
    
    def reinforce(self, num_episodes=2000, batch_size=10, gamma=0.99):
        # Set up lists to hold results
        total_rewards = []
        batch_rewards = []
        batch_actions = []
        batch_states = []
        batch_counter = 1
        
        # Define optimizer
        optimizer = optim.Adam(self._network.parameters(), 
                               lr=0.01)
        
        action_space = np.arange(self._n_outputs)
        for ep in range(num_episodes):
            world = SimWorldWrapper(robot_num=self._robot_num, task_num=self._task_num)
            tw = world.getTCS().getScheduler().getTimeWindow()
            s_0 = self.timeWindow2StateVector(tw)
            states = []
            rewards = []
            actions = []
            complete = False
            ii = 0
            while complete == False:
                # Get actions and convert to numpy array
                print('step {0}'.format(ii))
                ii += 1
                action_probs = self.predictActionProb(tw).detach().numpy()
                action = np.random.choice(action_space, p=action_probs)
                src, dst = self.action2SrcDst(action)
                tw, r, complete = world.step(src, dst)
                s_1 = self.timeWindow2StateVector(tw)
                states.append(s_0)
                rewards.append(r)
                actions.append(action)
                s_0 = s_1
                
                # If complete, batch data
                if complete:
                    # Print running average
                    print("epoch {0} completed".format(ep))
                    batch_rewards.extend(self.discountRewards(rewards, gamma))
                    batch_states.extend(states)
                    batch_actions.extend(actions)
                    batch_counter += 1
                    total_rewards.append(sum(rewards))
                    
                    # If batch is complete, update network
                    if batch_counter == batch_size:
                        optimizer.zero_grad()
                        state_tensor = torch.FloatTensor(batch_states)
                        reward_tensor = torch.FloatTensor(batch_rewards)
                        # Actions are used as indices, must be LongTensor
                        action_tensor = torch.LongTensor(batch_actions)
                        
                        # Calculate loss
                        logprob = torch.log(
                            policy_estimator.predict(state_tensor))
                        selected_logprobs = reward_tensor * \
                            logprob[np.arange(len(action_tensor)), action_tensor]
                        loss = -selected_logprobs.mean()
                        
                        # Calculate gradients
                        loss.backward()
                        # Apply gradients
                        optimizer.step()
                        
                        batch_rewards = []
                        batch_actions = []
                        batch_states = []
                        batch_counter = 1
                        
                    # Print running average
                    print("\rEp: {} Average of last 10: {:.2f}".format(
                        ep + 1, np.mean(total_rewards[-10:])), end="")
                    
        return total_rewards
    
if __name__ == '__main__':
    ri = Reinforce()
    rewards = ri.reinforce(num_episodes=5)
    window = 10
    smoothed_rewards = [np.mean(rewards[i-window:i+1]) if i > window 
                        else np.mean(rewards[:i+1]) for i in range(len(rewards))]

    plt.figure(figsize=(12,8))
    plt.plot(rewards)
    plt.plot(smoothed_rewards)
    plt.ylabel('Total Rewards')
    plt.xlabel('Episodes')
    plt.show()
