"""
Copyright: Wenyi Tang 2017-2018
Author: Wenyi Tang
Email: wenyi.tang@intel.com
Created Date: Oct 15th 2018

Extend the pre-Environment module, provide different and extensible
training methodology for SISR, VSR or other image tasks.
"""

#  Copyright (c): Wenyi Tang 2017-2019.
#  Author: Wenyi Tang
#  Email: wenyi.tang@intel.com
#  Update Date: 2019/4/3 下午8:28

import csv
import time
from pathlib import Path

import numpy as np
import tensorflow as tf
import tqdm

from ..Util.Config import Config
from ..Util.Utility import to_list


def _make_ckpt_name(name, scale, step):
  return '{}-sc{}-ep{:04d}.ckpt'.format(name, scale, step)


def _parse_ckpt_name(name):
  # sample name: {model}-sc{scale}-ep{epoch}.ckpt(.index)
  if not name:
    return 0
  model_name, scale, epochs = Path(name).stem.split('.')[0].split('-')
  return int(epochs[2:])


def _ensemble_expand(feature):
  r0 = feature
  r1 = np.rot90(feature, 1, axes=[-3, -2])
  r2 = np.rot90(feature, 2, axes=[-3, -2])
  r3 = np.rot90(feature, 3, axes=[-3, -2])
  r4 = np.flip(feature, axis=-2)
  r5 = np.rot90(r4, 1, axes=[-3, -2])
  r6 = np.rot90(r4, 2, axes=[-3, -2])
  r7 = np.rot90(r4, 3, axes=[-3, -2])
  return r0, r1, r2, r3, r4, r5, r6, r7


def _ensemble_reduce_mean(outputs):
  results = []
  for i in outputs:
    outputs_ensemble = [
      i[0],
      np.rot90(i[1], 3, axes=[-3, -2]),
      np.rot90(i[2], 2, axes=[-3, -2]),
      np.rot90(i[3], 1, axes=[-3, -2]),
      np.flip(i[4], axis=-2),
      np.flip(np.rot90(i[5], 3, axes=[-3, -2]), axis=-2),
      np.flip(np.rot90(i[6], 2, axes=[-3, -2]), axis=-2),
      np.flip(np.rot90(i[7], 1, axes=[-3, -2]), axis=-2),
    ]
    results.append(np.concatenate(outputs_ensemble).mean(axis=0, keepdims=True))
  return results


class Trainer:
  """A pure interface trainer.

     A trainer provides following APIs:
       >>> Trainer.fit
       >>> Trainer.infer
       >>> Trainer.benchmark
       >>> Trainer.export

     Args:
         model: the SR model object. @see SuperResolution
         work_dir: the dir to save training checkpoints and logs
         verbose: tf logging level
     """

  def __init__(self, model, work_dir, verbose=tf.logging.INFO):
    self._m = model
    self._saved = Path(work_dir) / 'save'
    self._logd = Path(work_dir) / 'log'
    self._verb = verbose
    self._restored = False
    self._csv = verbose <= tf.logging.INFO

  def _startup(self):
    tf.logging.set_verbosity(self._verb)
    self._saved.mkdir(parents=True, exist_ok=True)
    self._logd.mkdir(parents=True, exist_ok=True)
    if self._csv:
      self._csv_file = open(Path(self._logd / 'train_metrics.csv'), 'a')
      self._csv_writer = csv.writer(self._csv_file)
    if self.model.compiled:
      self.graph = tf.get_default_graph()
    else:
      with tf.Graph().as_default() as g:
        self.model.compile()
        self.graph = g

  def __enter__(self):
    """Create session of tensorflow and build model graph"""

    self._startup()
    conf = tf.ConfigProto(
      allow_soft_placement=True,
      gpu_options=tf.GPUOptions(allow_growth=True))
    sess = tf.Session(graph=self.graph, config=conf)
    sess.__enter__()
    self.savers = self.model.savers
    sess.run(tf.global_variables_initializer())
    return self

  def __exit__(self, exc_type, exc_val, exc_tb):
    """Close session"""

    sess = tf.get_default_session()
    sess.__exit__(exc_type, exc_val, exc_tb)

  def _find_last_ckpt(self):
    # restore the latest checkpoint in save dir
    ckpt = tf.train.get_checkpoint_state(self._saved)
    if ckpt and ckpt.model_checkpoint_path:
      return tf.train.latest_checkpoint(self._saved)
    # try another way
    ckpt = to_list(self._saved.glob('*.ckpt.index'))
    # sort as modification time
    ckpt = sorted(ckpt, key=lambda x: x.stat().st_mtime_ns)
    return self._saved / ckpt[-1].stem if ckpt else None

  def _restore_model(self, sess):
    last_checkpoint_step = 0
    for name in self.savers:
      saver = self.savers.get(name)
      ckpt = to_list(self._saved.glob('{}*.index'.format(name)))
      if ckpt:
        ckpt = sorted(ckpt, key=lambda x: x.stat().st_mtime_ns)
        ckpt = self._saved / ckpt[-1].stem
        try:
          saver.restore(sess, str(ckpt))
        except tf.errors.NotFoundError:
          tf.logging.warning(
            '{} of model {} could not be restored'.format(
              name, self.model.name))
        last_checkpoint_step = _parse_ckpt_name(ckpt)
    return last_checkpoint_step

  def _save_model(self, sess, step):
    for name in self.savers:
      saver = self.savers.get(name)
      file = self._saved / _make_ckpt_name(name, self.model.scale[0], step)
      saver.save(sess, str(file))

  def _restore(self):
    # restore graph
    sess = tf.get_default_session()
    if sess is None:
      raise RuntimeError('No session initialized')
    if self._restored:
      return sess
    self.last_epoch = self._restore_model(sess)
    self._restored = True
    return sess

  def export(self, export_dir='.', freeze_model=False):
    """Export model as protobuf

    Args:
        export_dir: directory to save the exported model
        freeze_model: freeze all trainable variables
    """

    self._restore()
    if freeze_model:
      self.model.export_freeze_model(export_dir)
    else:
      self.model.export_saved_model(export_dir)

  def set_seed(self, seed):
    np.random.seed(seed)
    tf.set_random_seed(seed)

  def fit(self, *args, **kwargs):
    raise NotImplementedError

  def infer(self, *args, **kwargs):
    raise NotImplementedError

  def benchmark(self, *args, **kwargs):
    raise NotImplementedError

  @property
  def model(self):
    return self._m


class VSR(Trainer):
  """Default trainer for task SISR or VSR"""
  v = Config()  # local variables
  """=======================================
      components, sub-functions, helpers
     =======================================
  """

  def query_config(self, config, **kwargs) -> Config:
    assert isinstance(config, Config)
    config.update(kwargs)  # override parameters
    self.v.epoch = config.epoch  # current epoch
    self.v.epochs = config.epochs  # total epochs
    self.v.lr = config.lr  # learning rate
    self.v.lr_schedule = config.lr_schedule
    self.v.memory_limit = config.memory_limit
    self.v.feature_callbacks = config.feature_callbacks or []
    self.v.label_callbacks = config.label_callbacks or []
    self.v.output_callbacks = config.output_callbacks or []
    self.v.validate_every_n_epoch = config.validate_every_n_epoch or 1
    self.v.subdir = config.subdir
    self.v.random_val = config.random_val
    self.v.ensemble = config.ensemble
    return self.v

  def fit_init(self) -> bool:
    v = self.v
    v.sess = self._restore()
    if self.last_epoch >= v.epochs:
      return False
    tf.logging.info('Fitting: {}'.format(self.model.name.upper()))
    self.model.display()
    v.summary_writer = tf.summary.FileWriter(
      str(self._logd), graph=tf.get_default_graph())
    v.global_step = self.model.global_steps.eval()
    return True

  def fit_close(self):
    # flush all pending summaries to disk
    if self.v.summary_writer:
      self.v.summary_writer.close()
    if self._csv:
      self._csv_file.close()

  def fn_train_each_epoch(self):
    v = self.v
    mem = v.memory_limit
    train_iter = v.train_loader.make_one_shot_iterator(mem, shuffle=True)
    if hasattr(v.train_loader, 'prefetch'):
      v.train_loader.prefetch(v.memory_limit)
    date = time.strftime('%Y-%m-%d %T', time.localtime())
    v.avg_meas = {}
    if v.lr_schedule and callable(v.lr_schedule):
      v.lr = v.lr_schedule(steps=v.global_step)
    print('| {} | Epoch: {}/{} | LR: {:.2g} |'.format(
      date, v.epoch, v.epochs, v.lr))
    with tqdm.tqdm(train_iter, unit='batch', ascii=True) as r:
      for items in r:
        label, feature, name, post = items[:4]
        self.fn_train_each_step(label, feature, name, post)
        r.set_postfix(v.loss)
    for _k, _v in v.avg_meas.items():
      print('| Epoch average {} = {:.6f} |'.format(_k, np.mean(_v)))
    if self._csv:
      if self._csv_file.tell() == 0:
        self._csv_writer.writerow(v.avg_meas.keys())
      self._csv_writer.writerow([np.mean(s) for s in v.avg_meas.values()])
      self._csv_file.flush()
    if v.epoch % v.validate_every_n_epoch == 0:
      self.benchmark(v.val_loader, v, epoch=v.epoch, memory_limit='1GB')
      v.summary_writer.add_summary(self.model.summary(), v.global_step)
      self._save_model(v.sess, v.epoch)

  def fn_train_each_step(self, label=None, feature=None, name=None, post=None):
    v = self.v
    for fn in v.feature_callbacks:
      feature = fn(feature, name=name)
    for fn in v.label_callbacks:
      label = fn(label, name=name)
    loss = self.model.train_batch(feature, label, learning_rate=v.lr,
                                  epochs=v.epoch)
    v.global_step = self.model.global_steps.eval()
    for _k, _v in loss.items():
      v.avg_meas[_k] = \
        v.avg_meas[_k] + [_v] if v.avg_meas.get(_k) else [_v]
      loss[_k] = '{:08.5f}'.format(_v)
    v.loss = loss

  def fn_infer_each_step(self, label=None, feature=None, name=None, post=None):
    v = self.v
    origin_feat = feature
    for fn in v.feature_callbacks:
      feature = fn(feature, name=name)
    if v.ensemble:
      # add self-ensemble boosting metric score
      feature_ensemble = _ensemble_expand(feature)
      outputs_ensemble = []
      for f in feature_ensemble:
        y, _ = self.model.test_batch(f, None)
        outputs_ensemble.append(y)
      outputs = []
      for i in range(len(outputs_ensemble[0])):
        outputs.append([j[i] for j in outputs_ensemble])
      outputs = _ensemble_reduce_mean(outputs)
    else:
      outputs, _ = self.model.test_batch(feature, None)
    for fn in v.output_callbacks:
      outputs = fn(outputs, input=origin_feat, name=name,
                   subdir=v.subdir, mode=v.color_format)

  def fn_benchmark_each_step(self, label=None, feature=None, name=None,
                             post=None):
    v = self.v
    origin_feat = feature
    for fn in v.feature_callbacks:
      feature = fn(feature, name=name)
    for fn in v.label_callbacks:
      label = fn(label, name=name)
    outputs, metrics = self.model.test_batch(feature, label, epochs=v.epoch)
    for _k, _v in metrics.items():
      if _k not in v.mean_metrics:
        v.mean_metrics[_k] = []
      v.mean_metrics[_k] += [_v]
    for fn in v.output_callbacks:
      outputs = fn(outputs, input=origin_feat, label=label, name=name,
                   mode=v.color_format, subdir=v.subdir)

  def fn_benchmark_body(self):
    v = self.v
    it = v.loader.make_one_shot_iterator(v.memory_limit, shuffle=v.random_val)
    for items in tqdm.tqdm(it, 'Test', ascii=True):
      label, feature, name, post = items[:4]
      self.fn_benchmark_each_step(label, feature, name, post)

  """=======================================
      Interface: fit, benchmark, infer
     =======================================
  """

  def fit(self, loaders, config, **kwargs):
    """Fit the model.

    Args:
        loaders: a tuple of 2 loaders, the 1st one is used for training,
          and the 2nd one is used for validating.
        config: fitting configuration, an instance of `Util.Config.Config`
        kwargs: additional arguments to override the same ones in config.
    """
    v = self.query_config(config, **kwargs)
    v.train_loader, v.val_loader = loaders
    if not self.fit_init():
      return
    for epoch in range(self.last_epoch + 1, v.epochs + 1):
      v.epoch = epoch
      self.fn_train_each_epoch()
    self.fit_close()

  def infer(self, loader, config, **kwargs):
    """Infer SR images.

    Args:
        loader: a loader for enumerating LR images
        config: inferring configuration, an instance of `Util.Config.Config`
        kwargs: additional arguments to override the same ones in config.
    """
    v = self.query_config(config, **kwargs)
    v.color_format = loader.color_format

    self._restore()
    it = loader.make_one_shot_iterator()
    if len(it):
      tf.logging.info('Inferring {} at epoch {}'.format(
        self.model.name, self.last_epoch))
    else:
      return
    # use original images in inferring
    for items in tqdm.tqdm(it, 'Infer', ascii=True):
      feature = items[0]
      name = items[2]
      self.fn_infer_each_step(None, feature, name)

  def benchmark(self, loader, config, **kwargs):
    """Benchmark/validate the model.

    Args:
        loader: a loader for enumerating LR images
        config: benchmark configuration, an instance of `Util.Config.Config`
        kwargs: additional arguments to override the same ones in config.
    """
    v = self.query_config(config, **kwargs)
    v.color_format = loader.color_format

    self._restore()
    v.mean_metrics = {}
    v.loader = loader
    self.fn_benchmark_body()
    for _k, _v in v.mean_metrics.items():
      print('{}: {:.6f}'.format(_k, np.mean(_v)), end=', ')
    print('')
