#!/usr/bin/env python
# coding=utf-8
'''
@Author: John
@Email: johnjim0816@gmail.com
@Date: 2020-06-11 20:58:21
@LastEditor: John
LastEditTime: 2022-07-21 21:51:34
@Discription: 
@Environment: python 3.7.7
'''
import sys,os
from xml.dom.expatbuilder import ParseEscape
import numpy as np
import time 
from gym import wrappers
# ROS packages required
import rospy
import rospkg
from torchvision import transforms
import PIL
from openai_ros.openai_ros_common import StartOpenAI_ROS_Environment
# curr_path = os.path.dirname(os.path.abspath(__file__))  # current path
# parent_path = os.path.dirname(curr_path)  # parent path
# sys.path.append(parent_path)  # add to system path

import datetime
from env import NormalizedActions,OUNoise
from ddpg import DDPG

def process_obs(data):
    assert type(data) is np.ndarray, "process_obs gets not np.array, is"+str(type(data))

    RE = transforms.Resize([78,78])
    img = RE(PIL.Image.fromarray(data[:,300:1550,:]))

    img = np.array(img)

    img = np.transpose(img,(2,0,1))   
    return img

def get_args():
    """ Hyperparameters
    """
    parser = {}

    parser['env_name'] = rospy.get_param('/turtlebot3/task_and_robot_environment_name')
    parser['pkg_name'] = rospy.get_param('/turtlebot3/pkg_name')
    parser['algo_name'] = rospy.get_param('/turtlebot3/algo_name')
    # parser['env_name'] = rospy.get_param('/turtlebot3/env_name')
    parser['train_eps'] = rospy.get_param('/turtlebot3/train_eps')
    parser['nsteps'] = rospy.get_param('/turtlebot3/nsteps')
    parser['test_eps'] = rospy.get_param('/turtlebot3/test_eps')
    parser['gamma'] = rospy.get_param('/turtlebot3/gamma')
    parser['critic_lr'] = rospy.get_param('/turtlebot3/critic_lr')
    parser['actor_lr'] = rospy.get_param('/turtlebot3/actor_lr')
    parser['memory_capacity'] = rospy.get_param('/turtlebot3/memory_capacity')
    parser['batch_size'] = rospy.get_param('/turtlebot3/batch_size')
    parser['target_update'] = rospy.get_param('/turtlebot3/target_update')
    parser['soft_tau'] = rospy.get_param('/turtlebot3/soft_tau')
    parser['hidden_dim'] = rospy.get_param('/turtlebot3/hidden_dim')
    parser['device'] = rospy.get_param('/turtlebot3/device')

    return parser

def env_agent_config(cfg, seed=1):
    rospy.init_node('turtlebot3_world_qlearn', anonymous=True)
    task_and_robot_environment_name = cfg['env_name']
    env = StartOpenAI_ROS_Environment(
        task_and_robot_environment_name)
    
    rospy.logerr("Gym environment done && Starting Learning")

    env = NormalizedActions(env) # 装饰action

    n_states = tuple(env.observation_space.shape)
    n_actions = env.action_space.shape[0]

    agent = DDPG(cfg, n_states,n_actions)
    
    return env,agent

def train(cfg, env, agent):
    print('Start training!')
    highest_reward = 0
    start_time = time.time()

    ou_noise = OUNoise(env.action_space)  # noise of action

    rewards = [] # 记录所有回合的奖励
    ma_rewards = []  # 记录所有回合的滑动平均奖励
    
    for i_ep in range(cfg['train_eps']):
        # rospy.logdebug("############### START EPISODE=>" + str(i_ep))
        state = process_obs(env.reset())  # --> [3,255,255]
        ou_noise.reset()
        rospy.logwarn("---------------episode: " + str(i_ep) + "-----------------")
        done = False
        ep_reward = 0

        # while not done:
        for i_step in range(cfg['nsteps']):  # 是否可以用env中的max_steps取代
            rospy.loginfo("------------step: " + str(i_step) + "---------------")
            action = agent.choose_action(state)  # action --> type=numpy.float32
            action = ou_noise.get_action(action, i_step+1) # action --> type=numpy.ndarray(1,)

            next_state, reward, done, _ = env.step(action)
            
            next_state = process_obs(next_state)

            ep_reward += reward
            
            # state --> np.ndarray action --> np.ndarray(1,)
            # reward --> np.float32  done --> bool
            agent.memory.push(state, action, reward, next_state, done)
            
            agent.update()

            if highest_reward < ep_reward:
                highest_reward = ep_reward

            total_step = i_ep + 1

            rospy.loginfo("EP: "+str(i_ep)+' -Step: '+ str(i_step) +" -ation: " + str(action)+" -reward: " + str(reward))

            if not (done):
                # rospy.logdebug("NOT DONE")
                state = next_state
            else:
                # rospy.logdebug("DONE")
                break
            
        
        if (i_ep+1)%10 == 0:
            print(f'Env:{i_ep+1}/{200}, Reward:{ep_reward:.2f}')
        rewards.append(ep_reward)
        m, s = divmod(int(time.time() - start_time), 60)
        h, m = divmod(m, 60)
        rospy.logwarn(("EP: " + str(i_ep + 1) + "] - Reward: " + str(ep_reward) + "STEPS: " + str(total_step) +" Time: %d:%02d:%02d" % (h, m, s)))

        # if ma_rewards:
        #     ma_rewards.append(0.9*ma_rewards[-1]+0.1*ep_reward)
        # else:
        #     ma_rewards.append(ep_reward)
    print('Finish training!')
    return {'rewards':rewards,'ma_rewards':ma_rewards}

def test(cfg, env, agent):
    print('Start testing')
    print(f'Env:{cfg.env_name}, Algorithm:{cfg.algo_name}, Device:{cfg.device}')
    rewards = [] # 记录所有回合的奖励
    ma_rewards = []  # 记录所有回合的滑动平均奖励
    for i_ep in range(cfg.test_eps):
        state = env.reset() 
        done = False
        ep_reward = 0
        i_step = 0
        while not done:
            i_step += 1
            action = agent.choose_action(state)
            next_state, reward, done, _ = env.step(action)
            ep_reward += reward
            state = next_state
        rewards.append(ep_reward)
        if ma_rewards:
            ma_rewards.append(0.9*ma_rewards[-1]+0.1*ep_reward)
        else:
            ma_rewards.append(ep_reward)
        print(f"Epside:{i_ep+1}/{cfg.test_eps}, Reward:{ep_reward:.1f}")
    print('Finish testing!')
    return {'rewards':rewards,'ma_rewards':ma_rewards}
if __name__ == "__main__":
    cfg = get_args()
    # training
    env,agent = env_agent_config(cfg, seed=1)
    res_dic = train(cfg, env, agent)
    # make_dir(cfg.result_path, cfg.model_path)
    # save_args(cfg)
    # agent.save(path=cfg.model_path)
    # save_results(res_dic, tag='train',
    #              path=cfg.result_path)  
    # plot_rewards(res_dic['rewards'], res_dic['ma_rewards'], cfg, tag="train")  
    # testing
    # env,agent = env_agent_config(cfg,seed=10)
    # agent.load(path=cfg.model_path)
    # res_dic = test(cfg,env,agent)
    # save_results(res_dic, tag='test',
    #              path=cfg.result_path)  
    # plot_rewards(res_dic['rewards'], res_dic['ma_rewards'], cfg, tag="test")   

