import os
import logging
import sys
import socket
import time

import numpy as np
import torch
import torch.nn as nn

from torch.optim import Adam
from collections import deque

from networks import CNNNetwork

from algs.ppo import ppo_update, generate_train_data
from algs.ppo import generate_action
from algs.ppo import transform_buffer


import uav_gym
from uav_gym.envs.rotodyne import RotodyneEnv
import gym



MAX_EPISODES = 5000
LASER_HIST = 1
HORIZON = 128 #128
GAMMA = 0.99
LAMDA = 0.95
BATCH_SIZE = 1024
EPOCH = 2
COEFF_ENTROPY = 5e-4
CLIP_VALUE = 0.1
NUM_ENV = 1
OBS_SIZE = 512
ACT_SIZE = 4
LEARNING_RATE = 5e-5



def run(env, policy, policy_path, action_bound, optimizer):

    # rate = rospy.Rate(5)
    buff = []
    global_update = 0
    global_step = 0



    for id in range(MAX_EPISODES):

        env.reset()

        # env.generate_goal_point()
        terminal = False
        ep_reward = 0
        step = 1

        # obs = env.get_laser_observation()
        # obs_stack = deque([obs, obs, obs])
        # goal = np.asarray(env.get_local_goal())
        # speed = np.asarray(env.get_self_speed())
        state = env.get_obs()
        while (state["depth_image"] is None or state["goal"] is None or state["odom_vel"] is None):
            logger.warning('Receive messages from ROS failed, trying again')
            state = env.get_obs()


        while not terminal:

            # generate actions at rank==0
            v, a, logprob, action=generate_action(state=state,
                                                         policy=policy, action_bound=action_bound)

            # execute actions
            r, terminal, state_next, status = env.step(action)

            ep_reward += r
            global_step += 1


            if global_step % HORIZON == 0:
                last_v, _, _, _ = generate_action(state=state_next, policy=policy,
                                                               action_bound=action_bound)
            # add transitons in buff and update policy


            buff.append((state, a, r, terminal, logprob, v))
            if len(buff) > HORIZON - 1:
                s_batch, goal_batch, speed_batch, a_batch, r_batch, d_batch, l_batch, v_batch = \
                    transform_buffer(buff=buff)
                t_batch, advs_batch = generate_train_data(rewards=r_batch, gamma=GAMMA, values=v_batch,
                                                          last_value=last_v, dones=d_batch, lam=LAMDA)
                memory = (s_batch, goal_batch, speed_batch, a_batch, l_batch, t_batch, v_batch, r_batch, advs_batch)
                ppo_update(policy=policy, optimizer=optimizer, batch_size=BATCH_SIZE, memory=memory,
                                        epoch=EPOCH, coeff_entropy=COEFF_ENTROPY, clip_value=CLIP_VALUE, num_step=HORIZON,
                                        num_env=NUM_ENV, frames=LASER_HIST,
                                        act_size=ACT_SIZE)

                buff = []
                global_update += 1

            step += 1
            state = state_next



        if global_update != 0 and global_update % 20 == 0:
            torch.save(policy.state_dict(), policy_path + '/Stage1_{}'.format(global_update))
            logger.info('########################## model saved when update {} times#########'
                        '################'.format(global_update))

        distance = np.sqrt((env.goal_point[0] - env.init_pose[0])**2 + (env.goal_point[1]-env.init_pose[1])**2)

        logger.info('Env %02d, Goal (%05.1f, %05.1f), Episode %05d, setp %03d, Reward %-5.1f, Distance %05.1f, %s' % \
                    (env.index, env.goal_point[0], env.goal_point[1], id + 1, step, ep_reward, distance, status))
        logger_cal.info(ep_reward)





if __name__ == '__main__':

    # config log
    hostname = socket.gethostname()
    if not os.path.exists('./log/' + hostname):
        os.makedirs('./log/' + hostname)
    output_file = './log/' + hostname + '/output.log'
    cal_file = './log/' + hostname + '/cal.log'

    # config log
    logger = logging.getLogger('mylogger')
    logger.setLevel(logging.INFO)

    file_handler = logging.FileHandler(output_file, mode='a')
    file_handler.setLevel(logging.INFO)
    file_handler.setFormatter(logging.Formatter("%(asctime)s - %(levelname)s - %(message)s"))
    stdout_handler = logging.StreamHandler(sys.stdout)
    stdout_handler.setLevel(logging.INFO)
    logger.addHandler(file_handler)
    logger.addHandler(stdout_handler)

    logger_cal = logging.getLogger('loggercal')
    logger_cal.setLevel(logging.INFO)
    cal_f_handler = logging.FileHandler(cal_file, mode='a')
    file_handler.setLevel(logging.INFO)
    logger_cal.addHandler(cal_f_handler)

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")


    # env = gym.make('uav_gym/Rotodyne-v0')
    env_config = {}
    env_config['frame_skip'] = 1
    env = RotodyneEnv(env_config)
    # TODO
    action_bound = [[-1,-1,-1,-np.pi], [1,1,1,np.pi]]

    # torch.manual_seed(1)
    # np.random.seed(1)

    policy_path = 'policy'
    # policy = MLPPolicy(obs_size, act_size)
    policy = CNNNetwork(frames=1, action_space=4)
    # TODO
    policy.to(device)
    opt = Adam(policy.parameters(), lr=LEARNING_RATE)
    mse = nn.MSELoss()

    if not os.path.exists(policy_path):
        os.makedirs(policy_path)

    file = policy_path + '/uav_policy.pth'
    if os.path.exists(file):
        logger.info('####################################')
        logger.info('############Loading Model###########')
        logger.info('####################################')
        state_dict = torch.load(file)
        policy.load_state_dict(state_dict)
    else:
        logger.info('#####################################')
        logger.info('############Start Training###########')
        logger.info('#####################################')



    run(env=env, policy=policy, policy_path=policy_path, action_bound=action_bound, optimizer=opt)
