#!/usr/bin/env python
# -*- coding:utf-8 -*-
# pylint: disable=no-member
"""
作者：qiangge
日期：2021年10月09日
"""
from torch.utils.data import DataLoader
from torch.autograd import Variable
import torch.optim as optim
import rvo2
import copy
from collections import namedtuple
from numpy.linalg import norm
import numpy as np
import torch.nn as nn
import git
import torch
import shutil
import logging
import itertools
import os
import sys
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))

from src.turtlebot3_dqn.Train_Environment import Env as train_Env
from src.turtlebot3_dqn.Test_Environment import Env as test_Env
from src.turtlebot3_dqn.state import ObservableState, FullState, JointState
from torch.utils.data import Dataset

ActionXY = namedtuple('ActionXY', ['vx', 'vy'])
ActionRot = namedtuple('ActionRot', ['v', 'r'])


class ORCA():
    def __init__(self):
        self.name = 'ORCA'
        self.trainable = False
        self.multiagent_training = True
        self.kinematics = 'holonomic'
        self.safety_space = 0
        self.neighbor_dist = 20
        self.max_neighbors = 10
        self.time_horizon = 5
        self.time_horizon_obst = 5
        self.radius = 0.5
        self.max_speed = 1
        self.sim = None
        self.time_step = 0.25
        self.phase = 'train'
        self.last_state = None

    def set_phase(self, phase):
        self.phase = phase

    def set_device(self, device):
        self.device = device

    def set_env(self, env):
        self.env = env

    def predict(self, state):
        self_state = state.self_state
        #params = self.neighbor_dist, self.max_neighbors, self.time_horizon, self.time_horizon_obst
        if self.sim is not None and self.sim.getNumAgents() != len(state.human_states) + 1:
            del self.sim
            self.sim = None
        if self.sim is None:
            self.sim = rvo2.PyRVOSimulator(self.time_step, self.neighbor_dist, self.max_neighbors, self.time_horizon, self.time_horizon_obst, self.radius, self.max_speed)
            self.sim.addAgent(self_state.position,self.neighbor_dist, self.max_neighbors, self.time_horizon, self.time_horizon_obst, self_state.radius + 0.01 +self.safety_space, self_state.v_pref, self_state.velocity)
            for human_state in state.human_states:
                self.sim.addAgent(human_state.position,self.neighbor_dist, self.max_neighbors, self.time_horizon, self.time_horizon_obst, human_state.radius + 0.01 +self.safety_space, self.max_speed, human_state.velocity)
        else:
            self.sim.setAgentPosition(0, self_state.position)
            self.sim.setAgentVelocity(0, self_state.velocity)
            for i, human_state in enumerate(state.human_states):
                self.sim.setAgentPosition(i + 1, human_state.position)
                self.sim.setAgentVelocity(i + 1, human_state.velocity)
        velocity = np.array((self_state.gx - self_state.px, self_state.gy - self_state.py))
        speed = np.linalg.norm(velocity)
        pref_vel = velocity / speed if speed > 1 else velocity

        # Perturb a little to avoid deadlocks due to perfect symmetry.
        # perturb_angle = np.random.random() * 2 * np.pi
        # perturb_dist = np.random.random() * 0.01
        # perturb_vel = np.array((np.cos(perturb_angle), np.sin(perturb_angle))) * perturb_dist
        # pref_vel += perturb_vel

        self.sim.setAgentPrefVelocity(0, tuple(pref_vel))
        for i, human_state in enumerate(state.human_states):
            self.sim.setAgentPrefVelocity(i + 1, (0, 0))
        self.sim.doStep()
        action = ActionXY(*self.sim.getAgentVelocity(0))
        self.last_state = state
        print('action:', action)
        return action


def mlp(input_dim, mlp_dims, last_relu=False):
    layers = []
    mlp_dims = [input_dim] + mlp_dims
    for i in range(len(mlp_dims) - 1):
        layers.append(nn.Linear(mlp_dims[i], mlp_dims[i + 1]))
        if i != len(mlp_dims) - 2 or last_relu:
            layers.append(nn.ReLU())
    net = nn.Sequential(*layers)
    return net


class ValueNetwork(nn.Module):
    def __init__(self, input_dim, self_state_dim, mlp_dims, lstm_hidden_dim):
        super(ValueNetwork, self).__init__()
        self.self_state_dim = self_state_dim
        self.lstm_hidden_dim = lstm_hidden_dim
        self.mlp = mlp(self_state_dim + lstm_hidden_dim, mlp_dims)
        self.lstm = nn.LSTM(input_dim, lstm_hidden_dim, batch_first=True)

    def forward(self, state):
        size = state.shape
        self_state = state[:, 0, :self.self_state_dim]
        # human_state = state[:, :, self.self_state_dim:]
        h0 = torch.zeros(1, size[0], self.lstm_hidden_dim)
        c0 = torch.zeros(1, size[0], self.lstm_hidden_dim)
        output, (hn, cn) = self.lstm(state, (h0, c0))
        hn = hn.squeeze(0)
        joint_state = torch.cat([self_state, hn], dim=1)
        value = self.mlp(joint_state)
        return value


class LstmRL():
    def __init__(self):
        self.name = 'LSTM-RL'
        self.env = None
        self.gamma = 0.9
        self.self_state_dim = 6
        self.human_state_dim = 7
        self.mlp_dims = [150, 100, 100, 1]
        self.global_state_dim = 50
        self.joint_state_dim = self.self_state_dim + self.human_state_dim
        self.model = ValueNetwork(self.input_dim(), self.self_state_dim, self.mlp_dims, self.global_state_dim)
        self.query_env = True
        self.speed_samples = 1
        self.rotation_samples = 8
        self.with_om = False
        self.phase = 'train'
        self.epsilon = 0
        self.device = None
        self.last_state = None
        self.action_space = None
        self.kinematics = 'holonomic'
        self.time_step = 0.25

    def input_dim(self):
        return self.joint_state_dim

    def get_model(self):
        return self.model

    def set_env(self, env):
        self.env = env

    def set_epsilon(self, epsilon):
        self.epsilon = epsilon

    def set_device(self, device):
        self.device = device

    def set_phase(self, phase):
        self.phase = phase

    def build_action_space(self, v_pref):
        """
         Action space consists of 25 uniformly sampled actions in permitted range and 25 randomly sampled actions.
        """
        holonomic = True if self.kinematics == 'holonomic' else False
        #speeds = [0.12885124808584156, 0.2862305178902687, 0.47845399210662953, 0.7132362736976232, 1.0]
        speeds = [(np.exp((i + 1) / self.speed_samples) - 1) /
                  (np.e - 1) * v_pref for i in range(self.speed_samples)]
        if holonomic:
            rotations = np.linspace(
                0, 2 * np.pi, self.rotation_samples, endpoint=False)
        else:
            rotations = np.linspace(-np.pi / 4, np.pi / 4,
                                    self.rotation_samples)

        action_space = [ActionXY(0, 0) if holonomic else ActionRot(0, 0)]
        for rotation, speed in itertools.product(rotations, speeds):
            if holonomic:
                action_space.append(
                    ActionXY(speed * np.cos(rotation), speed * np.sin(rotation)))
            else:
                action_space.append(ActionRot(speed, rotation))

        self.speeds = speeds
        self.rotations = rotations
        self.action_space = action_space

    def reach_destination(states):
        self_state = states.self_state
        if np.linalg.norm((self_state.py - self_state.gy, self_state.px - self_state.gx)) < self_state.radius:
            return True
        else:
            return False

    def propagate(self, state, action):
        if isinstance(state, ObservableState):
            # propagate state of humans
            next_px = state.px + action.vx * self.time_step
            next_py = state.py + action.vy * self.time_step
            next_state = ObservableState(
                next_px, next_py, action.vx, action.vy, state.radius)
        elif isinstance(state, FullState):
            # propagate state of current agent
            # perform action without rotation
            if self.kinematics == 'holonomic':
                next_px = state.px + action.vx * self.time_step
                next_py = state.py + action.vy * self.time_step
                next_state = FullState(next_px, next_py, action.vx, action.vy,
                                       state.radius, state.gx, state.gy, state.v_pref, state.theta)
            else:
                next_theta = state.theta + action.r
                next_vx = action.v * np.cos(next_theta)
                next_vy = action.v * np.sin(next_theta)
                next_px = state.px + next_vx * self.time_step
                next_py = state.py + next_vy * self.time_step
                next_state = FullState(next_px, next_py, next_vx, next_vy,
                                       state.radius, state.gx, state.gy, state.v_pref, next_theta)
        else:
            raise ValueError('Type error')

        return next_state

    def predict(self, state):
        def dist(human):
            return np.linalg.norm(np.array(human.position) - np.array(state.self_state.position))
        state.human_states = sorted(state.human_states, key=dist, reverse=True)
        if self.phase is None or self.device is None:
            raise AttributeError('Phase, device attributes have to be set!')
        if self.phase == 'train' and self.epsilon is None:
            raise AttributeError(
                'Epsilon attribute has to be set in training phase')

        if self.reach_destination(states):
            return ActionXY(0, 0) if self.kinematics == 'holonomic' else ActionRot(0, 0)
        if self.action_space is None:
            self.build_action_space(state.self_state.v_pref)

        occupancy_maps = None
        probability = np.random.random()
        if self.phase == 'train' and probability < self.epsilon:
            max_action = self.action_space[np.random.choice(
                len(self.action_space))]
        else:
            self.action_values = list()
            max_value = float('-inf')
            max_action = None
            for action in self.action_space:
                next_self_state = self.propagate(state.self_state, action)
                if self.query_env:
                    next_human_states, reward, done, info = self.env.onestep_lookahead(
                        action)
                else:
                    next_human_states = [self.propagate(human_state, ActionXY(
                        human_state.vx, human_state.vy)) for human_state in state.human_states]
                    reward = self.compute_reward(
                        next_self_state, next_human_states)
                batch_next_states = torch.cat([torch.Tensor([next_self_state + next_human_state]).to(
                    self.device)for next_human_state in next_human_states], dim=0)
                rotated_batch_input = self.rotate(
                    batch_next_states).unsqueeze(0)
                if self.with_om:
                    if occupancy_maps is None:
                        occupancy_maps = self.build_occupancy_maps(
                            next_human_states).unsqueeze(0)
                    rotated_batch_input = torch.cat(
                        [rotated_batch_input, occupancy_maps.to(self.device)], dim=2)
                # VALUE UPDATE
                next_state_value = self.model(rotated_batch_input).data.item()
                value = reward + \
                    pow(self.gamma, self.time_step *
                        state.self_state.v_pref) * next_state_value
                self.action_values.append(value)
                if value > max_value:
                    max_value = value
                    max_action = action
            if max_action is None:
                raise ValueError('Value network is not well trained. ')

        if self.phase == 'train':
            self.last_state = self.transform(state)
        print('max_action:', max_action)
        return max_action

    def rotate(self, state):
        """
        Transform the coordinate to agent-centric.
        Input state tensor is of size (batch_size, state_length)

        """
        # 'px', 'py', 'vx', 'vy', 'radius', 'gx', 'gy', 'v_pref', 'theta', 'px1', 'py1', 'vx1', 'vy1', 'radius1'
        #  0     1      2     3      4        5     6      7         8       9     10      11     12       13
        batch = state.shape[0]
        dx = (state[:, 5] - state[:, 0]).reshape((batch, -1))
        dy = (state[:, 6] - state[:, 1]).reshape((batch, -1))
        rot = torch.atan2(state[:, 6] - state[:, 1], state[:, 5] - state[:, 0])

        dg = torch.norm(torch.cat([dx, dy], dim=1), 2, dim=1, keepdim=True)
        v_pref = state[:, 7].reshape((batch, -1))
        vx = (state[:, 2] * torch.cos(rot) + state[:, 3]
              * torch.sin(rot)).reshape((batch, -1))
        vy = (state[:, 3] * torch.cos(rot) - state[:, 2]
              * torch.sin(rot)).reshape((batch, -1))

        radius = state[:, 4].reshape((batch, -1))
        # if self.kinematics == 'holonomic':
        theta = torch.zeros_like(v_pref)
        # else:
        #     # set theta to be zero since it's not used
        #     theta = (state[:, 8] - rot).reshape((batch, -1))
        vx1 = (state[:, 11] * torch.cos(rot) + state[:, 12]
               * torch.sin(rot)).reshape((batch, -1))
        vy1 = (state[:, 12] * torch.cos(rot) - state[:, 11]
               * torch.sin(rot)).reshape((batch, -1))
        px1 = (state[:, 9] - state[:, 0]) * torch.cos(rot) + \
            (state[:, 10] - state[:, 1]) * torch.sin(rot)
        px1 = px1.reshape((batch, -1))
        py1 = (state[:, 10] - state[:, 1]) * torch.cos(rot) - \
            (state[:, 9] - state[:, 0]) * torch.sin(rot)
        py1 = py1.reshape((batch, -1))
        radius1 = state[:, 13].reshape((batch, -1))
        radius_sum = radius + radius1
        da = torch.norm(torch.cat([(state[:, 0] - state[:, 9]).reshape((batch, -1)), (state[:, 1] - state[:, 10]).
                                  reshape((batch, -1))], dim=1), 2, dim=1, keepdim=True)
        new_state = torch.cat([dg, v_pref, theta, radius, vx, vy,
                              px1, py1, vx1, vy1, radius1, da, radius_sum], dim=1)
        return new_state

    def compute_reward(self, nav, humans):
        # collision detection
        dmin = float('inf')
        collision = False
        for i, human in enumerate(humans):
            dist = np.linalg.norm(
                (nav.px - human.px, nav.py - human.py)) - nav.radius - human.radius
            if dist < 0:
                collision = True
                break
            if dist < dmin:
                dmin = dist

        # check if reaching the goal
        reaching_goal = np.linalg.norm(
            (nav.px - nav.gx, nav.py - nav.gy)) < nav.radius
        if collision:
            reward = -0.25
        elif reaching_goal:
            reward = 1
        elif dmin < 0.2:
            reward = (dmin - 0.2) * 0.5 * self.time_step
        else:
            reward = 0

        return reward

    def transform(self, state):
        """
        Take the state passed from agent and transform it to the input of value network

        :param state:
        :return: tensor of shape (# of humans, len(state))
        """
        state_tensor = torch.cat([torch.Tensor([state.self_state + human_state]).to(self.device)
                                  for human_state in state.human_states], dim=0)
        if self.with_om:
            occupancy_maps = self.build_occupancy_maps(state.human_states)
            state_tensor = torch.cat(
                [self.rotate(state_tensor), occupancy_maps.to(self.device)], dim=1)
        else:
            state_tensor = self.rotate(state_tensor)
        return state_tensor

    def build_occupancy_maps(self, human_states):
        """

        :param human_states:
        :return: tensor of shape (# human - 1, self.cell_num ** 2)
        """
        occupancy_maps = []
        for human in human_states:
            other_humans = np.concatenate([np.array([(other_human.px, other_human.py, other_human.vx, other_human.vy)])
                                           for other_human in human_states if other_human != human], axis=0)
            other_px = other_humans[:, 0] - human.px
            other_py = other_humans[:, 1] - human.py
            # new x-axis is in the direction of human's velocity
            human_velocity_angle = np.arctan2(human.vy, human.vx)
            other_human_orientation = np.arctan2(other_py, other_px)
            rotation = other_human_orientation - human_velocity_angle
            distance = np.linalg.norm([other_px, other_py], axis=0)
            other_px = np.cos(rotation) * distance
            other_py = np.sin(rotation) * distance

            # compute indices of humans in the grid
            other_x_index = np.floor(
                other_px / self.cell_size + self.cell_num / 2)
            other_y_index = np.floor(
                other_py / self.cell_size + self.cell_num / 2)
            other_x_index[other_x_index < 0] = float('-inf')
            other_x_index[other_x_index >= self.cell_num] = float('-inf')
            other_y_index[other_y_index < 0] = float('-inf')
            other_y_index[other_y_index >= self.cell_num] = float('-inf')
            grid_indices = self.cell_num * other_y_index + other_x_index
            occupancy_map = np.isin(range(self.cell_num ** 2), grid_indices)
            if self.om_channel_size == 1:
                occupancy_maps.append([occupancy_map.astype(int)])
            else:
                # calculate relative velocity for other agents
                other_human_velocity_angles = np.arctan2(
                    other_humans[:, 3], other_humans[:, 2])
                rotation = other_human_velocity_angles - human_velocity_angle
                speed = np.linalg.norm(other_humans[:, 2:4], axis=1)
                other_vx = np.cos(rotation) * speed
                other_vy = np.sin(rotation) * speed
                dm = [list()
                      for _ in range(self.cell_num ** 2 * self.om_channel_size)]
                for i, index in np.ndenumerate(grid_indices):
                    if index in range(self.cell_num ** 2):
                        if self.om_channel_size == 2:
                            dm[2 * int(index)].append(other_vx[i])
                            dm[2 * int(index) + 1].append(other_vy[i])
                        elif self.om_channel_size == 3:
                            dm[3 * int(index)].append(1)
                            dm[3 * int(index) + 1].append(other_vx[i])
                            dm[3 * int(index) + 2].append(other_vy[i])
                        else:
                            raise NotImplementedError
                for i, cell in enumerate(dm):
                    dm[i] = sum(dm[i]) / len(dm[i]) if len(dm[i]) != 0 else 0
                occupancy_maps.append([dm])

        return torch.from_numpy(np.concatenate(occupancy_maps, axis=0)).float()


class ReplayMemory(Dataset):
    def __init__(self, capacity):
        self.capacity = capacity
        self.memory = list()
        self.position = 0

    def push(self, item):
        if len(self.memory) < self.position + 1:
            self.memory.append(item)
        else:
            self.memory[self.position] = item
        self.position = (self.position + 1) % self.capacity

    def is_full(self):
        return len(self.memory) == self.capacity

    def __getitem__(self, item):
        return self.memory[item]

    def __len__(self):
        return len(self.memory)

    def clear(self):
        self.memory = list()


class Robot(object):
    def __init__(self):
        self.v_pref = 1
        self.radius = 1
        self.kinematics = 'holonomic'
        self.policy = None
        self.px = None
        self.py = None
        self.gx = None
        self.gy = None
        self.vx = None
        self.vy = None
        self.theta = None
        self.time_step = 0.25

    def set_policy(self, policy):
        self.policy = policy

    def sample_random_attributes(self):
        self.v_pref = np.random.uniform(0.5, 1.5)
        self.radius = np.random.uniform(0.3, 0.5)

    def set(self, px, py, gx, gy, vx, vy, theta, radius=None, v_pref=None):
        self.px = px
        self.py = py
        self.gx = gx
        self.gy = gy
        self.vx = vx
        self.vy = vy
        self.theta = theta
        if radius is not None:
            self.radius = radius
        if v_pref is not None:
            self.v_pref = v_pref

    def get_observable_state(self):
        return ObservableState(self.px, self.py, self.vx, self.vy, self.radius)

    def get_next_observable_state(self, action):
        self.check_validity(action)
        pos = self.compute_position(action, self.time_step)
        next_px, next_py = pos
        if self.kinematics == 'holonomic':
            next_vx = action.vx
            next_vy = action.vy
        else:
            next_theta = self.theta + action.r
            next_vx = action.v * np.cos(next_theta)
            next_vy = action.v * np.sin(next_theta)
        return ObservableState(next_px, next_py, next_vx, next_vy, self.radius)

    def get_full_state(self):
        return FullState(self.px, self.py, self.vx, self.vy, self.radius, self.gx, self.gy, self.v_pref, self.theta)

    def get_position(self):
        return self.px, self.py

    def set_position(self, position):
        self.px = position[0]
        self.py = position[1]

    def get_goal_position(self):
        return self.gx, self.gy

    def get_velocity(self):
        return self.vx, self.vy

    def set_velocity(self, velocity):
        self.vx = velocity[0]
        self.vy = velocity[1]

    def act(self, ob):
        if self.policy is None:
            raise AttributeError('Policy attribute has to be set!')
        state = JointState(self.get_full_state(), ob)
        action = self.policy.predict(state)
        return action

    def check_validity(self, action):
        if self.kinematics == 'holonomic':
            assert isinstance(action, ActionXY)
        else:
            assert isinstance(action, ActionRot)

    def compute_position(self, action, delta_t):
        self.check_validity(action)
        if self.kinematics == 'holonomic':
            px = self.px + action.vx * delta_t
            py = self.py + action.vy * delta_t
        else:
            theta = self.theta + action.r
            px = self.px + np.cos(theta) * action.v * delta_t
            py = self.py + np.sin(theta) * action.v * delta_t

        return px, py

    def step(self, action):
        self.check_validity(action)
        pos = self.compute_position(action, self.time_step)
        self.px, self.py = pos
        if self.kinematics == 'holonomic':
            self.vx = action.vx
            self.vy = action.vy
        else:
            self.theta = (self.theta + action.r) % (2 * np.pi)
            self.vx = action.v * np.cos(self.theta)
            self.vy = action.v * np.sin(self.theta)

    def reached_destination(self):
        return norm(np.array(self.get_position()) - np.array(self.get_goal_position())) < self.radius


class Trainer(object):
    def __init__(self, model, memory, device, batch_size):
        """
        Train the trainable model of a policy
        """
        self.model = model
        self.device = device
        self.criterion = nn.MSELoss().to(device)
        self.memory = memory
        self.data_loader = None
        self.batch_size = batch_size
        self.optimizer = None

    def set_learning_rate(self, learning_rate):
        logging.info('Current learning rate: %f', learning_rate)
        self.optimizer = optim.SGD(
            self.model.parameters(), lr=learning_rate, momentum=0.9)

    def optimize_epoch(self, num_epochs):
        if self.optimizer is None:
            raise ValueError('Learning rate is not set!')
        if self.data_loader is None:
            self.data_loader = DataLoader(
                self.memory, self.batch_size, shuffle=True)
        average_epoch_loss = 0
        for epoch in range(num_epochs):
            epoch_loss = 0
            for data in self.data_loader:
                inputs, values = data
                inputs = Variable(inputs)
                values = Variable(values)

                self.optimizer.zero_grad()
                outputs = self.model(inputs)
                loss = self.criterion(outputs, values)
                loss.backward()
                self.optimizer.step()
                epoch_loss += loss.data.item()

            average_epoch_loss = epoch_loss / len(self.memory)
            logging.debug('Average loss in epoch %d: %.2E',
                          epoch, average_epoch_loss)

        return average_epoch_loss

    def optimize_batch(self, num_batches):
        if self.optimizer is None:
            raise ValueError('Learning rate is not set!')
        if self.data_loader is None:
            self.data_loader = DataLoader(
                self.memory, self.batch_size, shuffle=True)
        losses = 0
        for _ in range(num_batches):
            inputs, values = next(iter(self.data_loader))
            inputs = Variable(inputs)
            values = Variable(values)

            self.optimizer.zero_grad()
            outputs = self.model(inputs)
            loss = self.criterion(outputs, values)
            loss.backward()
            self.optimizer.step()
            losses += loss.data.item()

        average_loss = losses / num_batches
        logging.debug('Average loss : %.2E', average_loss)

        return average_loss


class Explorer(object):
    def __init__(self, env, robot, device, memory=None, gamma=None, target_policy=None):
        self.env = env
        self.robot = robot
        self.device = device
        self.memory = memory
        self.gamma = gamma
        self.target_policy = target_policy
        self.target_model = None

    def update_target_model(self, target_model):
        self.target_model = copy.deepcopy(target_model)

    # @profile
    def run_k_episodes(self, k, phase, update_memory=False, imitation_learning=False, episode=None, print_failure=False):
        self.robot.policy.set_phase(phase)
        success_times = []
        collision_times = []
        timeout_times = []
        success = 0
        collision = 0
        timeout = 0
        too_close = 0
        min_dist = []
        cumulative_rewards = []
        collision_cases = []
        timeout_cases = []
        for i in range(k):
            ob = self.env.reset()
            done = False
            states = []
            actions = []
            rewards = []
            while not done:
                action = self.robot.act(ob)
                ob, reward, done, flag = self.env.step(action)
                states.append(self.robot.policy.last_state)
                actions.append(action)
                rewards.append(reward)

                if flag == 4:
                    too_close += 1

            if flag == 3:
                success += 1
                success_times.append(self.env.global_time)
            elif flag == 2:
                collision += 1
                collision_cases.append(i)
                collision_times.append(self.env.global_time)
            elif flag == 1:
                timeout += 1
                timeout_cases.append(i)
                timeout_times.append(self.env.time_limit)
            else:
                raise ValueError('Invalid end signal from environment')

            if update_memory:
                if flag == 3 or flag == 4:
                    # only add positive(success) or negative(collision) experience in experience set
                    self.update_memory(
                        states, actions, rewards, imitation_learning)

            cumulative_rewards.append(sum([pow(self.gamma, t * self.robot.time_step * self.robot.v_pref)
                                           * reward for t, reward in enumerate(rewards)]))

        success_rate = success / k
        collision_rate = collision / k
        assert success + collision + timeout == k
        avg_nav_time = sum(
            success_times) / len(success_times) if success_times else self.env.time_limit

        extra_info = '' if episode is None else 'in episode {} '.format(
            episode)
        logging.info('{:<5} {}has success rate: {:.2f}, collision rate: {:.2f}, nav time: {:.2f}, total reward: {:.4f}'.
                     format(phase.upper(), extra_info, success_rate, collision_rate, avg_nav_time,
                            average(cumulative_rewards)))

        if print_failure:
            logging.info('Collision cases: ' +
                         ' '.join([str(x) for x in collision_cases]))
            logging.info('Timeout cases: ' +
                         ' '.join([str(x) for x in timeout_cases]))

    def update_memory(self, states, actions, rewards, imitation_learning=False):
        if self.memory is None or self.gamma is None:
            raise ValueError('Memory or gamma value is not set!')

        for i, state in enumerate(states):
            reward = rewards[i]

            # VALUE UPDATE
            if imitation_learning:
                # define the value of states in IL as cumulative discounted rewards, which is the same in RL
                state = self.target_policy.transform(state)
                # value = pow(self.gamma, (len(states) - 1 - i) * self.robot.time_step * self.robot.v_pref)
                value = sum([pow(self.gamma, max(t - i, 0) * self.robot.time_step * self.robot.v_pref) * reward
                             * (1 if t >= i else 0) for t, reward in enumerate(rewards)])
            else:
                if i == len(states) - 1:
                    # terminal state
                    value = reward
                else:
                    next_state = states[i + 1]
                    gamma_bar = pow(
                        self.gamma, self.robot.time_step * self.robot.v_pref)
                    value = reward + gamma_bar * \
                        self.target_model(next_state.unsqueeze(0)).data.item()
            value = torch.Tensor([value]).to(self.device)

            # # transform state of different human_num into fixed-size tensor
            # if len(state.size()) == 1:
            #     human_num = 1
            #     feature_size = state.size()[0]
            # else:
            #     human_num, feature_size = state.size()
            # if human_num != 5:
            #     padding = torch.zeros((5 - human_num, feature_size))
            #     state = torch.cat([state, padding])
            self.memory.push((state, value))


def average(input_list):
    if input_list:
        return sum(input_list) / len(input_list)
    else:
        return 0


def main():
    # configure paths
    choice = raw_input('please choice train or test:\n')
    print(choice)
    if choice == "train":  # train
        make_new_dir = True
        if os.path.exists('data/output'):
            key = raw_input(
                'Output directory already exists! Overwrite the folder? (y/n)')
            if key == 'y':
                shutil.rmtree('data/output')
            else:
                make_new_dir = False
        if make_new_dir:
            os.makedirs('data/output')
        log_file = os.path.join('data/output', 'output.log')
        il_weight_file = os.path.join('data/output', 'il_model')
        rl_weight_file = os.path.join('data/output', 'rl_model')
        file_handler = logging.FileHandler(log_file, mode='a')
        stdout_handler = logging.StreamHandler(sys.stdout)
        logging.basicConfig(level=logging.INFO, handlers=[stdout_handler, file_handler],
                            format='%(asctime)s, %(levelname)s: %(message)s', datefmt="%Y-%m-%d %H:%M:%S")

        # policy
        policy = LstmRL()
        device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
        policy.set_device(device)

        # environment
        env = train_Env()
        robot = Robot()
        env.set_robot(robot)

        # training parameters
        rl_learning_rate = 0.001
        train_batches = 100
        train_episodes = 5000
        sample_episodes = 2
        target_update_interval = 50
        evaluation_interval = 1000
        capacity = 10000
        epsilon_start = 0.5
        epsilon_end = 0.1
        epsilon_decay = 2000
        checkpoint_interval = 500

        #trainer and explorer
        memory = ReplayMemory(capacity)
        model = policy.get_model()
        batch_size = 100
        trainer = Trainer(model, memory, device, batch_size)
        explorer = Explorer(env, robot, device, memory,
                            policy.gamma, target_policy=policy)

        # imitation learning
        if os.path.exists(il_weight_file):
            model.load_state_dict(torch.load(il_weight_file))
        else:
            il_episodes = 3000
            il_epochs = 50
            il_learning_rate = 0.01
            trainer.set_learning_rate(il_learning_rate)
            safety_space = 0
            il_policy = ORCA()
            il_policy.safety_space = safety_space
            robot.set_policy(il_policy)
            explorer.run_k_episodes(
                il_episodes, 'train', update_memory=True, imitation_learning=True)
            trainer.optimize_epoch(il_epochs)
            torch.save(model.state_dict(), il_weight_file,
                       _use_new_zipfile_serialization=False)
            logging.info('Finish imitation learning. Weights saved.')
            logging.info('Experience set size: %d/%d',
                         len(memory), memory.capacity)
        explorer.update_target_model(model)

        # reinforcement learning
        policy.set_env(env)
        robot.set_policy(policy)
        trainer.set_learning_rate(rl_learning_rate)
        # fill the memory pool with some RL experience
        episode = 0
        while episode < train_episodes:
            if episode < epsilon_decay:
                epsilon = epsilon_start + \
                    (epsilon_end - epsilon_start) / epsilon_decay * episode
            else:
                epsilon = epsilon_end
            robot.policy.set_epsilon(epsilon)
            # evaluate the model
            if episode % evaluation_interval == 0:
                explorer.run_k_episodes(100, 'val', episode=episode)
            # sample k episodes into memory and optimize over the generated memory
            explorer.run_k_episodes(
                sample_episodes, 'train', update_memory=True, episode=episode)
            trainer.optimize_batch(train_batches)
            episode += 1
            if episode % target_update_interval == 0:
                explorer.update_target_model(model)
            if episode != 0 and episode % checkpoint_interval == 0:
                torch.save(model.state_dict(), rl_weight_file,
                           _use_new_zipfile_serialization=False)
        # final test
        explorer.run_k_episodes(200, 'test', episode=episode)
    elif choice == 'test':  # test
        il = False
        if il:
            model_weights = os.path.join('data/output', 'il_model')
        else:
            if os.path.exists(os.path.join('data/output', 'resumed_rl_model')):
                model_weights = os.path.join('data/output', 'resumed_rl_model')
            else:
                model_weights = os.path.join('data/output', 'rl_model')
        # configure logging and device
        logging.basicConfig(level=logging.INFO, format='%(asctime)s, %(levelname)s: %(message)s',
                            datefmt="%Y-%m-%d %H:%M:%S")
        device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
        logging.info('Using device: %s', device)

        # configure policy
        policy = LstmRL()
        policy.get_model().load_state_dict(torch.load(model_weights))

        # configure environment
        env = test_Env()
        robot = Robot()
        robot.set_policy(policy)
        env.set_robot(robot)
        explorer = Explorer(env, robot, device, gamma=0.9)
        policy.set_device(device)
        # set safety space for ORCA in non-cooperative simulation
        policy.set_env(env)
        visualize = raw_input('visualize is yes or no:(y/n)')
        if visualize == 'y':
            ob = env.reset()
            done = False
            info = 0
            last_pos = np.array(robot.get_position())
            while info == 3:
                action = robot.act(ob)
                ob, _, done, info = env.step(action)
                if done == True and info == 2:
                    done = False
                    ob = env.reset()
                current_pos = np.array(robot.get_position())
                logging.debug('Speed: %.2f', np.linalg.norm(
                    current_pos - last_pos) / robot.time_step)
                last_pos = current_pos
            # env.render()

            logging.info(
                'It takes %.2f seconds to finish. Final status is %s', env.global_time, info)
        else:
            explorer.run_k_episodes(200, 'test', print_failure=True)
    else:
        print('error input')


if __name__ == '__main__':
    main()
