# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.

import contextlib
import imp
import inspect
import logging
import multiprocessing
import os
import Queue
import signal
import sys
import traceback

from cStringIO import StringIO

from expect_tests.type_definitions import (
    Test, UnknownError, TestError, NoMatchingTestsError, MultiTest,
    Result, ResultStageAbort)

from expect_tests.unittest_helper import _is_unittest, UnittestTestCase
from expect_tests import util
from expect_tests import listing


@contextlib.contextmanager
def use_chdir(path):
  orig_cwd = os.getcwd()
  try:
    os.chdir(path)
    yield
  finally:
    os.chdir(orig_cwd)


class ResetableStringIO(object):
  def __init__(self):
    self._stream = StringIO()

  def reset(self):
    self._stream = StringIO()

  def __getattr__(self, key):
    return getattr(self._stream, key)


def get_package_path(package_name, path):
  """Return path toward 'package_name'.

  If path is None, search for a package in sys.path.
  Otherwise, look for a direct subdirectory of path.

  If no package is found, returns None.
  """
  if path is None:
    _, package_path, _ = imp.find_module(package_name)
  else:
    package_path = os.path.join(path, package_name)
    if not os.path.exists(package_path):
      raise ValueError('File not found: %s' % package_path)
  ispkg = os.path.isfile(os.path.join(package_path, '__init__.py'))
  return os.path.normpath(package_path) if ispkg else None


def is_test_file(filename):
  """Returns True if filename is supposed to contain tests.

  Args:
  filename (str): path to a python file.

  Returns:
  is_test (boolean): True if filename points to a test file.
  """
  return filename.endswith('_test.py')


def walk_package(package_name, path, subpath=None):
  """Return all test files inside a single package.

  In all cases, this function returns the full package name of files ending
  in '_test.py' found either under the package called <package_name>.
  Example: 'test_package.foo_test' for 'test_package/foo_test.py'.

  Provided that <path> is in sys.path, calling __import__ with one of the
  strings returned by this function works.

  If a config file is present somewhere in the search hierarchy,
  it is interpreted as a list of subdirectories to ignore. This is the way to
  make this function ignore some subpackages.

  This function has several behaviors depending on the arguments:
  - if path is None: search for package_name under any path in sys.path.
  - if path is not None: search for a package called package_name directly
    under <path> (sys.path is not used).

  It is not necessary to change sys.path for the present function to work (it
  does not actually import anything).

  Args:
  package_name (str): name of the package, as expected by import.
  path (str): path containing the above module (optional)
  subpath (str, optional): path inside the package, pointing to a subpackage.
     This is used to restrict the listing to part of the package.

  Returns:
  test_modules (list of str): name of modules containing tests. Each element is
     a period-separated string ending with '_test',
     e.g. shiny_package.subpackage.feature_test

  Example:
  modules = walk_package('shiny_package', 'some/directory')
  sys.path.insert(0, 'some/directory')
  __import__(modules[0])

  the first line assumes that the directory 'some/directory' is in the
    current working directory.
  """
  assert package_name, 'walk_package needs a package_name.'

  test_modules = []
  package_path = get_package_path(package_name, path)
  assert package_path, 'no package found.'

  base_path = os.path.split(package_path.rstrip(os.path.sep))[0]
  assert package_path.startswith(base_path)

  explored = set()

  if subpath:
    start_path = os.path.join(package_path, subpath)
    if not os.path.exists(start_path):
      raise ValueError('Provided subpath does not exist: %s' % start_path)
  else:
    start_path = package_path

  for dirpath, dirnames, filenames in os.walk(start_path, followlinks=True):
    # Keep only submodules not blacklisted, break symlink cycles
    blacklist = listing.get_config(dirpath)
    dirnames[:] = [d for d in dirnames
                   if d not in blacklist and
                   os.path.isfile(os.path.join(dirpath, d, '__init__.py')) and
                   os.path.realpath(os.path.join(dirpath, d)) not in explored]
    realpaths = [os.path.realpath(os.path.join(dirpath, d)) for d in dirnames]
    explored.update(realpaths)

    assert dirpath.startswith(start_path)
    base_module_name = os.path.relpath(dirpath, base_path).split(os.path.sep)
    test_modules.extend(['.'.join(base_module_name
                                  + [inspect.getmodulename(filename)])
                         for filename in filenames
                         if is_test_file(filename)])

  return test_modules


def load_module(modname):
  """Import and return the specified module.

  Uses __import__ instead of pkgutil's PEP302-style find_module/load_module
  because those just don't work. The tradeoff is that we have to walk down the
  package hierarchy to find the leaf module (since __import__ returns the
  topmost parent package), but at least this behaves deterministically.
  """
  mod = __import__(modname)

  for part in modname.split('.')[1:]:
    mod = getattr(mod, part)
  return mod


def get_test_gens_package(testing_context, subpath=None):
  """Given a testing context, return list of generators of *Test instances.

  See UnittestTestCase for possible return values.

  This function loads modules, thus no two conflicting packages (like appengine)
  should be loaded at the same time: use separate processes for that.

  Args:
    testing_context (PackageTestingContext): what to test.
    subpath (str): relative path in the tested package to restrict the search
      to. Relative to
      os.path.join(testing_context.cwd, testing_context.package_name)

  Returns:
    gens_list (list of generator of tests): tests are instances of Test
      or MultiTest.
  """
  test_gens = []

  # TODO(pgervais) add filtering on test names (use testing_context.filters)
  for modname in walk_package(testing_context.package_name,
                              testing_context.cwd, subpath=subpath):
    with use_chdir(testing_context.cwd):
      mod = load_module(modname)
    for obj in mod.__dict__.values():
      if util.is_test_generator(obj):
        test_gens.append(obj)
      elif _is_unittest(obj):
        test_gens.append(UnittestTestCase(obj))
  return test_gens


def gen_loop_process(testing_contexts, test_queue, result_queue, opts,
                     kill_switch, cover_ctx):
  """Generate `Test`s from |gens|, and feed them into |test_queue|.

  Non-Test instances will be translated into `UnknownError` objects.

  Args:
    testing_contexts (list of PackageTestingContext): describe tests to
      process.
    test_queue (multiprocessing.Queue):
    result_queue (multiprocessing.Queue):
    opts (argparse.Namespace):
    kill_switch (multiprocessing.Event):
    cover_ctx (cover.CoverageContext().create_subprocess_context)
  """

  SENTINEL = object()

  def generate_tests():
    seen_tests = False
    try:
      for testing_context in testing_contexts:
        for subpath, matcher in testing_context.itermatchers():
          paths_seen = set()

          with cover_ctx:
            gens = get_test_gens_package(testing_context, subpath=subpath)

          for gen in gens:
            gen_cover_ctx = cover_ctx.update(include=util.get_cover_list(gen))

            with gen_cover_ctx:
              gen_inst = gen()

            while not kill_switch.is_set():
              with gen_cover_ctx:
                root_test = next(gen_inst, SENTINEL)

              if root_test is SENTINEL:
                break

              if kill_switch.is_set():
                break

              ok_tests = []

              if isinstance(root_test, MultiTest):
                subtests = root_test.tests
              else:
                subtests = [root_test]

              for subtest in subtests:
                if not isinstance(subtest, Test):
                  result_queue.put_nowait(
                      UnknownError(
                          'Got non-[Multi]Test isinstance from generator: %r.'
                          % subtest))
                  continue

                test_path = subtest.expect_path()
                if test_path is not None and test_path in paths_seen:
                  result_queue.put_nowait(
                      TestError(subtest, 'Duplicate expectation path.'))
                else:
                  if test_path is not None:
                    paths_seen.add(test_path)
                  name = subtest.name
                  # if not neg_matcher.match(name) and matcher.match(name):
                  if matcher.match(name):
                    ok_tests.append(subtest)

              if ok_tests:
                seen_tests = True
                yield root_test.restrict(ok_tests)

      if not seen_tests:
        result_queue.put_nowait(NoMatchingTestsError())

    except KeyboardInterrupt:
      pass

  next_stage = (result_queue if opts.handler.SKIP_RUNLOOP else test_queue)
  opts.handler.gen_stage_loop(opts, generate_tests(), next_stage.put_nowait,
                              result_queue.put_nowait)


def run_loop_process(test_queue, result_queue, opts,
                     kill_switch, test_gen_finished,
                     cover_ctx, cwd):
  """Consume ``Test`` instances from ``test_queue``, run them, and yield the
  results into opts.handler.run_stage_loop().

  Generates coverage data as a side-effect.

  Args:
    module_path (str or None): path to add to sys.path.
    test_queue (multiprocessing.Queue()):
    result_queue (multiprocessing.Queue()):
    opts (argparse.Namespace):
    kill_switch (multiprocessing.Event):
    test_gen_finished (multiprocessing.Event):
    cover_ctx (cover.CoverageContext().create_subprocess_context()):
  """
  logstream = ResetableStringIO()
  logger = logging.getLogger()
  logger.setLevel(logging.DEBUG)
  shandler = logging.StreamHandler(logstream)
  shandler.setFormatter(
      logging.Formatter('%(levelname)s: %(message)s'))
  logger.addHandler(shandler)

  # Should only happen in 'debug' mode, useless/dangerous otherwise
  if opts.handler.stdin:
    sys.stdin = opts.handler.stdin

  # Don't let tests write directly to stdout, stderr.
  sys.stdout.close()
  sys.stdout = StringIO()
  sys.stderr.close()
  sys.stderr = StringIO()

  def get_log_lines():
    """Get test log output, stdout, and stderr lines as a list of strings."""
    lines = []
    for name, source in (
        ('LOG', logstream), ('STDOUT', sys.stdout), ('STDERR', sys.stderr)):
      lines.append('%s:' % name)
      raw_output = source.getvalue()
      if raw_output:
        lines.extend(raw_output.splitlines())
      else:
        lines.append('(empty)')
      lines.append('')
    return lines

  SKIP = object()
  def process_test(run_ctx, subtest):
    """run_ctx: context manager
    subtest: Test or subclass of
    """
    logstream.reset()
    try:
      with cover_ctx.update(include=subtest.coverage_includes()):
        with run_ctx(subtest):
          with use_chdir(cwd):
            subresult = subtest.run()

    except Exception:
      result_queue.put_nowait(
        TestError(subtest, traceback.format_exc(), get_log_lines()))
      return SKIP
    if isinstance(subresult, TestError):
      result_queue.put_nowait(subresult)
      return SKIP
    elif not isinstance(subresult, Result):
      result_queue.put_nowait(
          TestError(
              subtest,
              'Got non-Result instance from test: %r' % subresult))
      return SKIP
    return subresult

  def generate_tests_results(run_ctx):
    try:
      while not kill_switch.is_set():
        try:
          test = test_queue.get(timeout=0.1)
        except Queue.Empty:
          if test_gen_finished.is_set():
            break
          continue

        try:
          for subtest, subresult in test.process(
              func=lambda x: process_test(run_ctx, x)):
            if subresult is not SKIP:
              yield subtest, subresult, get_log_lines()
        except Exception:
          result_queue.put_nowait(
              TestError(test, traceback.format_exc(), get_log_lines()))
    except KeyboardInterrupt:
      pass

  opts.handler.run_stage_loop(
    opts,
    generate_tests_results(opts.handler.run_stage_loop_ctx),
    result_queue.put_nowait)


def result_loop_single_context(cover_ctx, kill_switch, result_queue, opts,
                               processing_context):
  """Run the specified operation on a single path.

  The path provided by the `path` argument is considered to be either a Python
  package or a directory containing Python packages depending on the value of
  the `path_is_package` flag.

  The current working directory is changed (`os.chdir`) to either `path` or the
  parent of `path` whether `path_is_package` is False or True respectively.

  This function is meant to be run as a dedicated process. Calling it twice
  in the same process is not supported.

  Args:
    cover_ctx:
    kill_switch (multiprocessing.Event):
    result_queue (multiprocessing.Queue):
    opts: output of argparse.ArgumentParser.parse_args (see main.py)
    processing_context (ProcessingContext): the task to perform.
  """
  assert os.path.isabs(processing_context.cwd)
  # pretest_filename can officially be accessed by .expect_tests_pretest.py to
  # get to this file's location. So no renaming!
  pretest_filename = os.path.join(processing_context.cwd,
                                  '.expect_tests_pretest.py')
  if os.path.isfile(pretest_filename):
    execfile(pretest_filename)

  sys.path.insert(0, processing_context.cwd)

  # This flag is set when test generation has finished.
  test_gen_finished = multiprocessing.Event()
  test_queue = multiprocessing.Queue()

  test_gen_args = (
      processing_context.testing_contexts, test_queue, result_queue, opts,
      kill_switch, cover_ctx
  )

  procs = []
  if opts.handler.SKIP_RUNLOOP:
    gen_loop_process(*test_gen_args)
  else:
    procs = [
        multiprocessing.Process(
            target=run_loop_process,
            args=(test_queue, result_queue, opts,
                  kill_switch, test_gen_finished, cover_ctx,
                  processing_context.cwd),
            name='run_loop_process %d' % job_num)
        for job_num in xrange(opts.jobs)
    ]

    for p in procs:
      p.daemon = True
      p.start()

    gen_loop_process(*test_gen_args)
    # Signal all run_loop_process that they can exit.
    test_gen_finished.set()

  for p in procs:
    p.join()


def result_loop(cover_ctx, opts):
  """Run the specified operation in all paths in parallel.

  Directories and packages to process are defined in opts.directory and
  opts.package.

  The operation to perform (list/test/debug/train) is defined by opts.handler.
  """
  processing_contexts = listing.get_runtime_contexts(opts.test_glob)

  def ensure_echo_on():
    """Restore echo on in the terminal.

    This is useful when killing a pdb session with C-c.
    """
    try:
      import termios
    except ImportError:
      termios = None
    if termios:
      fd = sys.stdin.fileno()
      try:
        attr_list = termios.tcgetattr(fd)
      except termios.error:
        pass
      else:
        if not attr_list[3] & termios.ECHO:
          attr_list[3] |= termios.ECHO
          # TODO(pgervais): does not work on windows. Fix.
          termios.tcsetattr(fd, termios.TCSANOW, attr_list)

  kill_switch = multiprocessing.Event()
  def handle_killswitch(*_):
    kill_switch.set()
    ensure_echo_on()
    # Reset the signal to DFL so that double ctrl-C kills us for sure.
    signal.signal(signal.SIGINT, signal.SIG_DFL)
    signal.signal(signal.SIGTERM, signal.SIG_DFL)

  signal.signal(signal.SIGINT, handle_killswitch)
  signal.signal(signal.SIGTERM, handle_killswitch)

  result_queue = multiprocessing.Queue()

  procs = [
    multiprocessing.Process(
      target=result_loop_single_context,
      args=(cover_ctx, kill_switch, result_queue, opts, c)
      )
    for c in processing_contexts
    ]

  error = False

  try:
    def generate_objects(procs):
      for p in procs:
        p.start()
        while not kill_switch.is_set():
          try:
            yield result_queue.get(timeout=0.1)
          except Queue.Empty:
            pass

          if not p.is_alive():
            break

        if kill_switch.is_set():
          p.terminate()
        p.join()

        # Past this point, the process is finished
        # Get everything still in the queue. Since nothing is going to be
        # adding stuff to the queue, go as fast as possible.
        while not kill_switch.is_set():
          try:
            yield result_queue.get_nowait()
          except Queue.Empty:
            break

      if kill_switch.is_set():
        raise ResultStageAbort()

    if procs:
      error = opts.handler.result_stage_loop(opts, generate_objects(procs))
  except ResultStageAbort:
    pass

  if not kill_switch.is_set() and not result_queue.empty():
    error = True

  return error, kill_switch.is_set()
