# Copyright (c) 2019 Presto Labs Pte. Ltd.
# Author: jaewon

import datetime
import functools
import hashlib
import logging
import os
from concurrent.futures import ProcessPoolExecutor, Future

import h5py
import numpy as np
import pandas as pd

from coin.base.datetime_util import to_datetime, to_timestamp_int
from coin.base.param_util import to_list

import native.prophet.py.prophet.fastfeed as native_fastfeed
from coin.feed.fastfeed.constants import DEFAULT_FEED_CACHE_DIR
from coin.feed.fastfeed.util import split_time_range

from experimental.prophet.graph import graph as graph_mod
from experimental.prophet.graph import algo as graph_algo
from experimental.prophet.ops.util import to_duration_int
from experimental.prophet.ops.timer import list_periodic_timers, list_custom_timers
from experimental.prophet.ops.fastfeed import find_product_from_graph


def _on_ping():
  # Do nothing.
  pass


def _find_all_shared_vars(graph):
  shared_vars = []
  for node in graph.nodes:
    if node.is_shared_variable:
      shared_vars.append(node.name)
  return shared_vars


def _query_transitive_rdeps(graph_queryer, node_names):
  all_rdeps = set()
  for node_name in to_list(node_names):
    rdeps = graph_queryer.query_transitive_rdeps(node_name)
    for rdep in rdeps:
      all_rdeps.add(rdep)
  return sorted(all_rdeps, key=lambda n: n.idx)


class FastfeedGraphAdapter:
  def __init__(self,
               graph,
               default_machine,
               eval_updated_only=True,
               eval_callback=None,
               ping_callback=None):
    self._graph = graph
    self._default_machine = default_machine

    if eval_callback is not None:
      eval_callback = functools.partial(eval_callback, self._graph)

    graph.instantiate()
    self._adapter = native_fastfeed._FastfeedGraphAdapter()
    self._adapter.init(graph._instance.cc_instance,
                       default_machine,
                       eval_updated_only,
                       eval_callback,
                       ping_callback or _on_ping)

    graph_queryer = graph_algo.build_graph_queryer(graph)

    # Shared variables' transitive r-deps
    additional_rdeps = _query_transitive_rdeps(graph_queryer, _find_all_shared_vars(graph))

    for node in graph.nodes:
      if node.is_placeholder:
        rdeps = graph_queryer.query_transitive_rdeps(node.name)
        rdeps = sorted(set(rdeps + additional_rdeps), key=lambda n: n.idx)
        self._adapter.init_transitive_rdeps(node.name, [rd.name for rd in rdeps])

    periodic_timers = list_periodic_timers(graph)
    for var_name, period in periodic_timers:
      self._adapter.add_periodic_timer(var_name, period)

    custom_timers = list_custom_timers(graph)
    for var_name, period in custom_timers:
      self._adapter.add_custom_timer(var_name, period)

  @property
  def default_machine(self):
    return self._default_machine

  @property
  def native_adapter(self):
    return self._adapter

  def nullify_fastfeed_placeholders(self):
    self._adapter.nullify_fastfeed_placeholders()

  def get_feed_sub_requests(self, feed_sub_req=None, worker_id=None):
    from coin.strategy.mm.subscription import SubscriptionRequest, FeedSubscriptionRequest
    feed_sub_req = feed_sub_req or FeedSubscriptionRequest()
    for req in self._adapter.fastfeed_sub_reqs:
      assert req.machine == self.default_machine
      assert req.worker_id == worker_id
      sub_req = SubscriptionRequest.from_str(req.sub_req)
      products = [find_product_from_graph(symbol, graph=self._graph) for symbol in req.symbols]
      feed_sub_req.add_products(products, sub_req)
    return feed_sub_req

  def on_book_reset(self, sub_req, book_builder, feed_cache_key):
    if sub_req is not None:
      assert feed_cache_key.sub_req == sub_req.to_str()
    self._adapter.on_book_reset(feed_cache_key, book_builder.cc_builder)


class DataFrameBuilder:
  def __init__(self, output_vars):
    self._output_vars = output_vars
    self._output_dict = {}
    self._output_table = []
    for out_var in output_vars:
      out_var.instantiate()
      colname = out_var.name or out_var.instance_name
      self._output_table.append([])
      self._output_dict[colname] = self._output_table[-1]

  def aggregate(self, value_aggregator):
    assert value_aggregator.ncols == len(self._output_vars), value_aggregator.ncols
    for idx, our_var in enumerate(self._output_vars):
      self._output_table[idx].extend(value_aggregator.get_values(idx))

  def gen_dataframe(self):
    df_out = pd.DataFrame(self._output_dict)
    return df_out


def _remove_file(path, ignore_error=True):
  try:
    os.remove(tmpname)
  except Exception:
    if not ignore_error:
      raise


def _run_graph(graph_func,
               from_ts,
               to_ts,
               machine,
               feed_cache_dir,
               eval_updated_only,
               eval_callback,
               dump_pending_row,
               overwrite_outfile,
               outfile):
  if not overwrite_outfile and os.path.exists(outfile):
    return

  try:
    # Initialize graph.
    graph = graph_mod.Graph()
    with graph_mod.as_default_graph(graph):
      aggregator = graph_func()

    adapter = FastfeedGraphAdapter(graph,
                                   machine,
                                   eval_updated_only=eval_updated_only,
                                   eval_callback=eval_callback)
    adapter.nullify_fastfeed_placeholders()

    # Run graph.
    ranges = split_time_range(from_ts, to_ts)
    prev_runner = None  # To prevent destroying last builder objects.
    for idx, (date, daily_from_ts, daily_to_ts) in enumerate(ranges):
      runner = native_fastfeed._FastfeedGraphRunner()
      runner.init(adapter.native_adapter, date.strftime('%Y%m%d'), feed_cache_dir)
      runner.run(daily_from_ts, daily_to_ts)
      prev_runner = runner

    adapter.nullify_fastfeed_placeholders()
    prev_runner = None

    # Dump output.
    dirpath = os.path.dirname(outfile)
    os.makedirs(dirpath, exist_ok=True)
    filename = os.path.basename(outfile)
    tmpfile = os.path.join(dirpath, '.' + filename)

    try:
      aggregator.dump(tmpfile, dump_pending_row=dump_pending_row)
      os.rename(tmpfile, outfile)
    except Exception:
      _remove_file(tmpfile)
      raise

  except KeyboardInterrupt:
    pass


def _get_run_cache_file(aggregator, from_ts, to_ts, cache_dir, extension='.hd5', **kwargs):
  base_str = str({'aggregator_hash': aggregator.get_hash(), **kwargs})
  dirname = hashlib.sha1(base_str.encode('utf-8')).hexdigest()

  from_ts = to_datetime(from_ts)
  to_ts = to_datetime(to_ts) - datetime.timedelta(microseconds=1)
  filename = '%s-%s%s' % (
      from_ts.strftime('%Y%m%d_%H%M%S'), to_ts.strftime('%Y%m%d_%H%M%S'), extension)
  return os.path.join(cache_dir, dirname, filename)


class HDataFrameBuilder_HD5:
  def __init__(self, colnames, col_filter=None):
    self._colnames = list(colnames)
    self._outputs = {col: [] for col in colnames}
    self._col_filter = col_filter if col_filter is not None else (lambda _: True)

  def add_file(self, filepath):
    with h5py.File(filepath, 'r') as file:
      assert set(self._colnames) <= set(file['.colnames']), (
          '%s\n%s' % ((set(self._colnames) - set(file['.colnames'])), set(file['.colnames']) -
                      (set(self._colnames))))
      for col in self._colnames:
        if self._col_filter(col):
          self._outputs[col].append(np.ma.array(file[col], mask=file[col + '.null']))

  def gen_dataframe(self):
    df_out = pd.DataFrame({
        col: np.ma.concatenate(self._outputs[col]) if self._outputs[col] else []
        for col in self._colnames
        if self._col_filter(col)
    })
    return df_out


class _DebugExecutor:
  def submit(self, fn, *args, **kwargs):
    f = Future()
    try:
      res = fn(*args, **kwargs)
      f.set_result(res)
    except Exception as e:
      f.set_exception(e)
    return f

  def __enter__(self):
    return self

  def __exit__(self, exc_type, exc, exc_tb):
    pass


def run_from_fastfeed_multiprocess(graph_func,
                                   from_ts,
                                   to_ts,
                                   machine,
                                   *,
                                   eval_updated_only=True,
                                   dump_pending_row=False,
                                   use_run_cache=True,
                                   max_workers=4,
                                   result_dir=None,
                                   feed_cache_dir=None,
                                   run_cache_dir=None,
                                   gen_dataframe_output=True,
                                   inject_date_to_graph_func=False,
                                   date_filter=None,
                                   col_filter=None,
                                   duration_before=0,
                                   duration_after=0,
                                   debug_executor=False,
                                   eval_callback=None,
                                   verbose=False):
  feed_cache_dir = feed_cache_dir or DEFAULT_FEED_CACHE_DIR
  run_cache_dir = os.path.expanduser(run_cache_dir or '~/.prophet')
  date_filter = lambda date: True
  duration_before = to_duration_int(duration_before)
  duration_after = to_duration_int(duration_after)

  if isinstance(machine, list):
    assert len(machine) == 1
    machine = machine[0]

  ranges = split_time_range(from_ts, to_ts)

  graph = graph_mod.Graph()
  with graph_mod.as_default_graph(graph):
    if inject_date_to_graph_func:
      aggregator = graph_func(ranges[0][0])
    else:
      aggregator = graph_func()

  if verbose:
    logging.info(aggregator.colnames)

  get_run_cache_file = (lambda from_ts,
                        to_ts: _get_run_cache_file(aggregator,
                                                   from_ts,
                                                   to_ts,
                                                   run_cache_dir,
                                                   machine=machine,
                                                   eval_updated_only=eval_updated_only,
                                                   dump_pending_row=dump_pending_row))

  def init_executor():
    if not debug_executor:
      return ProcessPoolExecutor(max_workers=max_workers)
    else:
      return _DebugExecutor()

  futures = []
  with init_executor() as executor:
    for date, daily_from_ts, daily_to_ts in ranges:
      daily_from_ts -= duration_before
      daily_to_ts += duration_after

      if not date_filter(date):
        logging.info('Skipping %s' % date.strftime('%Y%m%d'))
        continue

      run_cache_file = get_run_cache_file(daily_from_ts, daily_to_ts)

      if verbose:
        logging.info('%s: %s', date.strftime('%Y%m%d'), run_cache_file)

      if inject_date_to_graph_func:
        graph_func_closure = functools.partial(graph_func, date)
      else:
        graph_func_closure = graph_func

      fut = executor.submit(_run_graph,
                            graph_func=graph_func_closure,
                            from_ts=daily_from_ts,
                            to_ts=daily_to_ts,
                            machine=machine,
                            feed_cache_dir=feed_cache_dir,
                            eval_updated_only=eval_updated_only,
                            eval_callback=eval_callback,
                            dump_pending_row=dump_pending_row,
                            overwrite_outfile=not use_run_cache,
                            outfile=run_cache_file)
      futures.append((date, run_cache_file, fut))

  run_cache_files = []
  for _, run_cache_file, fut in futures:
    try:
      fut.result()
      run_cache_files.append(run_cache_file)
    except Exception:
      logging.exception('Error on %s', date)

  if result_dir is None:
    res_files = [(date, cache_file) for date, cache_file, _ in futures]
  else:
    if result_dir:
      os.makedirs(result_dir, exist_ok=True)
    res_files = []
    for data, cache_file, _ in futures:
      res_file = os.path.join(result_dir, os.path.basename(cache_file))
      res_files.append((date, res_file))
      os.symlink(cache_file, res_file)

  if gen_dataframe_output:
    df_builder = HDataFrameBuilder_HD5(aggregator.colnames, col_filter=col_filter)
    for run_cache_file in run_cache_files:
      try:
        df_builder.add_file(run_cache_file)
      except OSError:
        logging.exception('Error while reading file %s. Skip.', run_cache_file)
        continue
    return df_builder.gen_dataframe()

  return res_files
