import os
import repo_path
import logging
import subprocess
import select
import fcntl
import queue
import threading
import errno
import time
import multiprocessing

from results import stream

logger = logging.getLogger(__name__)

def node_cache(function):
  """Decorates function to memoize the results in self.cached_nodes."""
  def cache_function(self, key, *args):
    if key in self.cached_nodes:
      return self.cached_nodes[key]
    node = function(self, key, *args)
    self.cached_nodes[key] = node
    return node
  return cache_function


class ConsoleEventLogger(object):
  """Logs all events to the console."""

  def __init__(self, term):
    self._term = term

  def log_command_start(self, name, command):
    """Logs when a command starts executing.

    Args:
      name: string, The target name.
      command: list of strings, the command being executed.
    """
    self._term.reset()
    self._term.lines.append(name)
    self._term.lines.append(str(command))
    self._term.flush()

  def log_command_success(self, name, run_output=None):
    """Logs when a command finishes successfully

    Args:
      name: string, The target name.
      run_output: string, The output of the command.
    """
    self._term.reset()
    if run_output:
      self._term.log(stream.yellow(name))
    else:
      self._term.log(stream.green(name))

    if run_output:
      self._term.log(run_output)
    self._term.flush()

  def log_missing_input(self, name, missing):
    """Logs when a command is missing input files.

    Args:
      name: string, The target name.
      missing: list of strings, The files missing.
    """
    self._term.reset()
    self._term.log(stream.red(
      'Missing input files to ' + name + ' : ' + repr(missing)))
    self._term.flush()

  def log_command_failure(self, name, command, run_output):
    """Logs when a command fails.

    Args:
      name: string, The target name.
      command: list of strings, the command that was executed.
      run_output: string, The output of the command.
    """
    self._term.reset()
    self._term.log(stream.red(name))
    self._term.log('command is ' + str(command))
    if run_output:
      self._term.log(run_output)
    self._term.flush()

  def log_test_success(self, test_name, elapsed_time):
    """Logs when a test succeeds.

    Args:
      test_name: string, The test target name.
      elapsed_time: double, The time the test took to run.
    """
    self._term.reset()
    self._term.log(stream.green(
      '  Test Passed: %s [%.3fs]' % (test_name, elapsed_time)))
    self._term.flush()

  def log_test_failure(self, test_name, test_exe, run_output):
    """Logs when a test failure.

    Args:
      test_name: string, The test target name.
      test_exe: string, The test target executable.
      run_output: string, The output from the test.
    """
    self._term.reset()
    self._term.log(stream.red('  Test %s failed' % test_name))
    self._term.log('Failed while running %s' % test_exe)
    if run_output:
      self._term.log(run_output)
    else:
      self._term.log(stream.yellow('Test had no output'))
    self._term.flush()

  def log_aborting_build(self):
    """Logs when the build is aborted."""
    self._term.log(stream.red('Aborting build'))

  def log_build_success(self):
    """Logs when the build succeeds."""
    self._term.reset()
    self._term.log("Build Success")
    self._term.flush()

  def log_build_failure(self):
    """Logs when the build fails."""
    self._term.reset()
    self._term.log("Build Failure")
    self._term.flush()


class QuietConsoleEventLogger(ConsoleEventLogger):
  """Logs all events to the console, with less noise."""
  def __init__(self, term):
    super().__init__(term)

  def log_command_success(self, name, run_output=None):
    build_string = 'Finished building ' + name
    if self._term.is_tty:
      self._term.reset()
      self._term.lines.append(stream.green(build_string))
      self._term.lines.append('')
      self._term.flush()
    else:
      self._term.reset()
      self._term.log(build_string)
      self._term.flush()

  def log_command_start(self, name, command):
    build_string = 'Started building ' + name
    if self._term.is_tty:
      self._term.reset()
      self._term.lines.append(stream.green(build_string))
      self._term.lines.append('')
      self._term.flush()
    else:
      self._term.reset()
      self._term.log(build_string)
      self._term.flush()

class EvalGraphNode(object):
  """Represents a node in an EvalGraph.

  Properties:
    graph: The EvalGraph this is part of.
    name: The user-visible name for this node.
    work: The dep_graph.cache_view.GenRule corresponding to this node or None.
    exe: The executable corresponding to this node.
    _start_time: None, or the time that the node started running at.
    dependencies: The set of dep_graph.cache_view.GenRules we depend on.
    finished_dependencies: The set of EvalGraphNodes we depend on that have finished.
    affects: EvalGraphNodes that depend on us.
    added: Whether we have been added to the dot graph.
    __finished: None if the command hasn't finished, True if the command
                succeeded, or False if the command failed.
    __job: The subprocess.Popen of our currently running build command or None.
    __run_output: The output from __job.
  """
  def __init__(self, graph, name, work=None, exe=None):
    self.graph = graph
    self.name = name
    self.work = work
    self.exe = exe
    self._start_time = None
    self.dependencies = set()
    self.finished_dependencies = set()
    self.affects = set()
    self.added = False
    self.__finished = None
    self.__job = None
    self.__run_output = []

    self.graph.ready.add(self)

  def finished(self):
    assert self.__finished is not None
    return self.__finished

  def run_and_finished(self):
    return self.__finished is not None and self.__finished

  def dep_on(self, other):
    """Adds a dependency on other.

    Args:
      other: The dep_graph.cache_view.GenRule to add a dependency on.
    """
    if not other:
      return
    other.affects.add(self)
    self.dependencies.add(other)
    if self in self.graph.ready:
      self.graph.ready.remove(self)

  def report_dep_finished(self, node):
    """Report that a node we depend on is done.

    Args:
      node: The EvalGraphNode that is reporting it is done.
    """
    self.finished_dependencies.add(node)
    if len(self.finished_dependencies) == len(self.dependencies):
      self.graph.ready.add(self)

  def add_to_dot(self, dot_graph):
    if not self.added:
      color = "green" if self.work or self.exe else "blue"
      self.added = dot_graph.add_pt(self.name, color=color)
      for dep in self.dependencies:
        dot_graph.line(dep.add_to_dot(dot_graph), self.added)
    return self.added

  def remove(self):
    """Removes self from the graph."""
    for affected in self.affects:
      affected.dependencies.remove(self)
      for dep in self.dependencies:
        dep.affects.add(affected)
        affected.dependencies.add(dep)
    for dep in self.dependencies:
      dep.affects.remove(self)

  def fileno(self):
    """Only valid if we are currently running our command.

    Returns the number of the command's stdout."""
    assert self.__job is not None
    return self.__job.stdout.fileno()

  @property
  def pid(self):
    """Only valid if we are currently running our command.

    Returns the PID of the running command."""
    assert self.__job is not None
    return self.__job.pid

  def __run_finished(self, event_logger, success):
    if self.work and success:
      self.work.set_valid()
      for fset in self.work.inp.getFsets():
        self.graph.add_back_fsets.add(fset)
      for fset in self.work.wipe.getFsets():
        self.graph.add_back_fsets.add(fset)
    self.__finished = success

    real_command = self.work and self.work.get_cmd() is not None
    run_output = b''.join(self.__run_output).decode('utf-8').rstrip('\r\n')
    if success:
      for affected in self.affects:
        affected.report_dep_finished(self)

    if real_command:
      if success:
        event_logger.log_command_success(self.name, run_output)
      else:
        event_logger.log_command_failure(self.name, run_output, self.work.get_cmd())
    elif self.exe:
      if success:
        event_logger.log_test_success(self.name, time.time() - self._start_time)
      else:
        event_logger.log_test_failure(self.name, self.exe, run_output)

  def run_finished(self, event_logger, status):
    """Called to indicate that the child process is finished running.

    Args:
      event_logger: The ConsoleEventLogger to write output to.
      status: The output as returned from os.wait() et al.

    Returns:
      True if this result should stop the build.
    """
    assert self in self.graph.running
    self.graph.running.remove(self)
    self.read_run_output()
    self.__run_finished(event_logger, status == 0)
    self.__job.stdout.close()
    return False if self.exe else status != 0

  def read_run_output(self):
    """Called whenever the child process has readable output."""
    read = self.__job.stdout.read()
    if read:
      self.__run_output.append(read)

  def start_run(self, event_logger):
    """Starts the command running.

    Args:
      event_logger: The ConsoleEventLogger to write output to.
    """
    assert self.__finished is None
    assert self.__job is None
    if self.work and self.work.get_cmd() is not None or self.exe:
      if self.work:
        missing = set()
        for fset in self.work.inp.getFsets():
          for fname in fset.files:
            if not os.path.exists(fname):
              missing.add(fname)
        if missing:
          event_logger.log_missing_input(self.name, list(missing))
          return False

      cmd = self.exe or self.work.get_cmd()
      self._start_time = time.time()

      event_logger.log_command_start(self.name, cmd)

      if self.work:
        for fset in self.work.out.getFsets():
          for fname in fset.files:
            if type(fname) is str:
              repo_path.ensure_path(os.path.dirname(fname))
      if cmd:
        if self.work:
          env = self.work.get_env()
        else:
          env = None
        logger.info('Running command ' + str(cmd))
        self.__job = subprocess.Popen(cmd,
                                      env=env,
                                      stdout=subprocess.PIPE,
                                      stderr=subprocess.STDOUT)
        flags = fcntl.fcntl(self.__job.stdout, fcntl.F_GETFL)
        fcntl.fcntl(self.__job.stdout, fcntl.F_SETFL, flags | os.O_NONBLOCK)
        self.graph.running.add(self)
      else:
        self.__run_finished(event_logger, True)
    else:
      self.__run_finished(event_logger, True)
    return True


class EvalGraph(object):
  """Represents a graph of nodes to evaluate.

  Properties:
    cached_nodes: A cache of nodes which have been added.
    ready: The set of nodes which are currently ready to run.
    running: The set of nodes which are currently running.
    final: The node representing our end goal.
    add_back_files: The cached result of get_add_back.
    add_back_fsets: dep_graph.cache_view.FileSets we need to add to
                    add_back_files.
  """
  def __init__(self):
    self.cached_nodes = dict()
    self.ready = set()
    self.running = set()
    self.final = EvalGraphNode(self, "final")
    self.add_back_files = set()
    self.add_back_fsets = set()

  def get_add_back(self):
    """Updates and returns the set of all files we need to add back.

    Does a depth-first search of all FileSets we have built since last time this
    was called and adds all new input files.

    Returns:
      The set of all input files we depend (transitively) depend on.
    """
    stk = list(self.add_back_fsets)
    visited = set()
    while len(stk):
      node = stk.pop()
      visited.add(node)
      if node.is_input() or node.is_wipe():
        for fname in node.files:
          self.add_back_files.add(fname)
      for fset in node.dependencies:
        if fset not in visited:
          stk.append(fset)
    self.add_back_fsets = set()
    return self.add_back_files

  @node_cache
  def add_fset(self, fset):
    """Adds an fset to the graph.

    Args:
      fset: The dep_graph.cache_view.FileSet to add.

    Returns the node corresponding to the FileSet."""
    node = EvalGraphNode(self, str(fset.key))
    for ofset in fset.dependencies:
      node.dep_on(self.add_fset(ofset))
    if fset.is_output() and fset.files:
      node.dep_on(self.add_genrule(fset.gen_rule))
    return node

  @node_cache
  def add_test(self, test_fset, name):
    """Adds a test to the graph.

    Args:
      test_fset: The dep_graph.cache_view.FileSet corresponding to the test
                 binary(s).
      name: string, The rule name
    """
    for test in test_fset.file_names():
      node = EvalGraphNode(self, name, exe=[x for x in test])
      node.dep_on(self.add_fset(test_fset))
      self.final.dep_on(node)

  @node_cache
  def add_genrule(self, rule):
    """Adds a rule to the graph.

    Args:
      rule: The dep_graph.cache_view.GenRule to add to the graph.

    Returns the node corresponding to the GenRule or None if it is already
    valid."""
    if rule.valid:
      return None
    node = EvalGraphNode(self, str(rule), work=rule)
    for fset in rule.inp.getFsets():
      node.dep_on(self.add_fset(fset))
    return node

  @node_cache
  def add_final_genrule(self, rule):
    """Adds a rule to the graph which is a top-level dependency.

    Args:
      rule: The dep_graph.cache_view.GenRule to add.

    Returns the node corresponding to the rule."""
    node = self.add_genrule(rule)
    self.final.dep_on(node)
    return node

  def run_cmds(self):
    """Runs all of the build rules.

    Returns True if everything succeeded or False if it didn't."""
    cpu_count = multiprocessing.cpu_count()

    with stream.ModifiableStreamTerm() as term:
      event_logger = QuietConsoleEventLogger(term)

      done_children = queue.Queue()
      wait_for_children_done = threading.Event()
      def wait_for_children():
        while not wait_for_children_done.is_set():
          try:
            result = os.wait()
            done_children.put_nowait(result)
          except OSError as e:
            if e.errno != errno.ECHILD:
              raise e
      wait_for_children_thread = threading.Thread(target=wait_for_children)
      wait_for_children_thread.daemon = True
      wait_for_children_thread.start()
      stop = False
      try:
        while self.running or self.ready:
          assert not self.final.run_and_finished()
          while not stop and len(self.running) < cpu_count and self.ready:
            if not self.ready.pop().start_run(event_logger):
              event_logger.log_aborting_build()
              stop = True
          if not self.running:
            assert stop or not self.ready
            break
          readable, _, _ = select.select(self.running,
                                         tuple(),
                                         tuple())
          try:
            while True:
              done = done_children.get_nowait()
              pid, status = done
              found = False
              for child in self.running:
                if child.pid == pid:
                  assert not found
                  if child.run_finished(event_logger, status):
                    if not stop:
                      event_logger.log_aborting_build()
                      stop = True
                  if child in readable:
                    readable.remove(child)
                  found = True
                  break
              assert found
          except queue.Empty:
            pass
          for to_read in readable:
            to_read.read_run_output()
        if self.final.run_and_finished():
          event_logger.log_build_success()
        else:
          event_logger.log_build_failure()
        return self.final.run_and_finished()
      finally:
        wait_for_children_done.set()
        wait_for_children_thread.join()
