# Copyright 2021 The Brax Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

"""RL training with an environment running entirely on an accelerator."""

import os
import uuid

from absl import app
from absl import flags
from clu import metric_writers
import jax
from brax import envs
#from fetch import Fetch
from brax.envs.fetch import Fetch
from brax.io import html
from brax.io import model
import my_algo
from robotisgp import Robotisgp
from brax.envs import wrappers
import functools

FLAGS = flags.FLAGS

flags.DEFINE_integer('total_env_steps', 50000000,
                     'Number of env steps to run training for.')
flags.DEFINE_integer('eval_frequency', 10, 'How many times to run an eval.')
flags.DEFINE_integer('seed', 0, 'Random seed.')
flags.DEFINE_integer('num_envs', 4, 'Number of envs to run in parallel.')
flags.DEFINE_integer('action_repeat', 1, 'Action repeat.')
flags.DEFINE_integer('unroll_length', 30, 'Unroll length.')
flags.DEFINE_integer('batch_size', 4, 'Batch size.')
flags.DEFINE_integer('num_minibatches', 1, 'Number')
flags.DEFINE_integer('num_update_epochs', 1,
                     'Number of times to reuse each transition for gradient '
                     'computation.')
flags.DEFINE_float('reward_scaling', 10.0, 'Reward scale.')
flags.DEFINE_float('entropy_cost', 3e-4, 'Entropy cost.')
flags.DEFINE_integer('episode_length', 1000, 'Episode length.')
flags.DEFINE_float('discounting', 0.99, 'Discounting.')
flags.DEFINE_float('learning_rate', 5e-4, 'Learning rate.')
flags.DEFINE_float('max_gradient_norm', 1e9,
                   'Maximal norm of a gradient update.')
flags.DEFINE_string('logdir', '', 'Logdir.')
flags.DEFINE_bool('normalize_observations', True,
                  'Whether to apply observation normalization.')
flags.DEFINE_integer('max_devices_per_host', None,
                     'Maximum number of devices to use per host. If None, '
                     'defaults to use as much as it can.')
# Evolution Strategy related flags
flags.DEFINE_integer('population_size', 1,
                     'Number of environments in ES. The actual number is 2x '
                     'larger (used for antithetic sampling.')
flags.DEFINE_float('perturbation_std', 0.1,
                   'Std of a random noise added by ES.')
flags.DEFINE_integer('fitness_shaping', 0,
                     'Defines a type of fitness shaping to apply.'
                     'Just check the code in es to figure out what '
                     'numbers mean.')
flags.DEFINE_bool('center_fitness', False,
                  'Whether to normalize fitness after the shaping.')
flags.DEFINE_integer('fitness_episode_length', 1000,
                     'Episode length to be used for fitness computation.')
flags.DEFINE_float('l2coeff', 0,
                   'L2 regularization coefficient for model params.')
# SAC hps.
flags.DEFINE_integer('min_replay_size', 8192,
                     'Minimal replay buffer size before the training starts.')
flags.DEFINE_integer('max_replay_size', 1048576, 'Maximal replay buffer size.')
flags.DEFINE_float('grad_updates_per_step', 1.0,
                   'How many SAC gradient updates to run per one step in the '
                   'environment.')

def create(env_class,
           episode_length = 1000,
           action_repeat = 1,
           auto_reset = True,
           batch_size = None,
           **kwargs):
  env = env_class(**kwargs)
  if episode_length is not None:
    env = wrappers.EpisodeWrapper(env, episode_length, action_repeat)
  if batch_size:
    env = wrappers.VectorWrapper(env, batch_size)
  if auto_reset:
    env = wrappers.AutoResetWrapper(env)

  return env  # type: ignore


def create_fn(env, **kwargs):
  return functools.partial(create, env, **kwargs)

def main(unused_argv):

  env_fn = create_fn(Fetch)
  writer = metric_writers.create_default_writer(FLAGS.logdir)
  writer.write_hparams({'log_frequency': FLAGS.eval_frequency,
                        'num_envs': FLAGS.num_envs,
                        'total_env_steps': FLAGS.total_env_steps})

  with metric_writers.ensure_flushes(writer):
      inference_fn, params, _ = my_algo.train(
          environment_fn=env_fn,
          num_envs=FLAGS.num_envs,
          max_devices_per_host=FLAGS.max_devices_per_host,
          action_repeat=FLAGS.action_repeat,
          normalize_observations=FLAGS.normalize_observations,
          num_timesteps=FLAGS.total_env_steps,
          log_frequency=FLAGS.eval_frequency,
          batch_size=FLAGS.batch_size,
          unroll_length=FLAGS.unroll_length,
          num_minibatches=FLAGS.num_minibatches,
          num_update_epochs=FLAGS.num_update_epochs,
          learning_rate=FLAGS.learning_rate,
          entropy_cost=FLAGS.entropy_cost,
          discounting=FLAGS.discounting,
          seed=FLAGS.seed,
          reward_scaling=FLAGS.reward_scaling,
          episode_length=FLAGS.episode_length,
          progress_fn=writer.write_scalars)

  env = env_fn()
  state = env.reset(jax.random.PRNGKey(FLAGS.seed))

  # Save to flax serialized checkpoint.
  filename = f'Fetch_my.flax'
  path = os.path.join(FLAGS.logdir, filename)
  model.save_params(path, params)

  # output an episode trajectory
  qps = []
  jit_inference_fn = jax.jit(inference_fn)
  jit_step_fn = jax.jit(env.step)
  rng = jax.random.PRNGKey(FLAGS.seed)
  while not state.done:
    qps.append(state.qp)
    tmp_key, rng = jax.random.split(rng)
    act = jit_inference_fn(params, state.obs, tmp_key)
    state = jit_step_fn(state, act)

  html_path = f'{FLAGS.logdir}/trajectory_{uuid.uuid4()}.html'
  html.save_html(html_path, env.sys, qps)



if __name__ == '__main__':
  app.run(main)
