#!/usr/bin/python -S
"""
testy.py

Simple test framework that mirrors the interface of Python's built-in unittest,
without all the weirdness.  It's meant to provide an easy upgrade path.

A key feature of testy is running the same tests against different
implementations of the same API, using different backends or 'verifiers'.

unittest annoyances:
  - Inheritance hierachies for TestCase objects are an antipattern.
  - There are lots of unrelated methods bundled on the TestCase objects.
  - TestCase objects can't be instantiated easily by the user, which makes it
    hard/awkward to write tests with multiple backends.

DETAILS:

  Test: A group of test methods, which have their on setUp/tearDown
      setUpOnce/tearDownOnce pairs.  The test methods share the state of the
      Test instance.
  Verifier: All Test instances have a verifier member.  It contains the various
      "verification methods" or assertions.  It's intended that you design an
      API for verifiers that's a "domain specific language" for your test
      problem.  Then the details can be specified by the individual verifier
      instances (see jsontemplate: verify.Expansion, verify.CompilationError,
      verify.EvaluationError)
  TestVisitor: A test runner should implement this interface.  This interface
      defines the various events that happen when searching over a module/source
      tree for Test *instances* to run.

Test *classes*, Verifier *classes*, and test methods may all have labels.  These
are used for many flexible kinds of test filtering and declarative constraints.
"""

__author__ = 'Andy Chu'


# TODO: dis, timeit, and profile aren't used very often.  Could lazily import
# these.

import dis
import inspect
import os
import re
import sys
import timeit
import traceback
import unittest
try:
  import json
except ImportError:
  import simplejson as json

try:
  import cProfile as profile  # Python 2.5
  profile_module_warning = False
except ImportError:
  import profile  # Python 2.4
  profile_module_warning = True

import cmdapp
import log
import params
import testdata
# For assertMultilineEqual.  TODO: Might want to just copy that in here.
import testutils


class Error(Exception):
  pass


class TestPrequisiteMissing(Error):
  """You can't run the tests because a prerequisite is missing.

  This exception is not caught by the test runner -- it's treated as a hard
  error.
  """


class ClassDef(object):
  """Can be used in place of a constructor for a class.

  The instance can then be constructed later.
  """
  def __init__(self, *args, **kwargs):
    self.args = args
    self.kwargs = kwargs


#
# Verifier classes
#

class _PyUnitCompatibleVerifier(unittest.TestCase):
  """For stealing some behavior from unittest, and put it in SimpleVerifier.

  This isn't meant to be public.  Only PyUnitCompatibleTest is public.
  """
  def __init__(self):
    """So we can instantiate it directly."""
    unittest.TestCase.__init__(self)

  def runTest(self):
    """This method is necessary for Python 2.7 compatibility.

    unittest.TestCase.__init__ was not meant to be subclassed.
    """
    pass


class SimpleVerifier(object):
  """An object with a bunch of methods that verify conditions.

  The verifiers here correspond roughly to the assert* methods available in
  unittest.
  """
  # Verifiers can have arbitrary labels.
  LABELS = []

  # This has no state, so it can be a class variable.
  _dummy = _PyUnitCompatibleVerifier()

  def __init__(self, log=None):
    self.current_method = None
    self.log = log or DefaultLog()

  def BeforeMethod(self, method):
    """
    By default we put the currently running method in self.current_method.  This
    means that the verifiers aren't thread-safe -- you need to instantiate one
    per thread.

    This is done so that the verifiers can see the name of the test method, the
    docstring, which is useful for generating docs.

    They might also want to see the labels, so they can choose how to run the
    test, perhaps?  Again, for documentation generation, you might only want to
    generate docs for properly labeled methods.

    Or you might want a 'version' label, and the verifiers would do different
    things based on the version.

    I have a feeling that this can be misused, so I'm trying to justify it.
    """
    self.current_method = method

  def AfterMethod(self, method):
    """Called after each test method, before the tearDown."""
    pass

  # self.verify.Equal(...)
  Equal = _dummy.assertEqual
  Raises = _dummy.assertRaises
  IsTrue = _dummy.assertTrue
  IsFalse = _dummy.assertFalse
  fail = _dummy.fail

  # TODO: Add more of these...


def _StripLines(s):
  return '\n'.join(line.strip() for line in s.splitlines())


class StandardVerifier(SimpleVerifier):
  """More common verifiers."""

  def CheckIfRunnable(self):
    """
    This hook can be used to test if a test can be run, *before* any tests are
    run.  That is, we don't want to run a whole bunch of tests, only to find out
    that the JVM is missing 20 minutes into the test.  That should be reported
    at the bginning.
    """
    pass

  def setUpOnce(self):
    """
    Called by the Test instance that owns this verifier, at the beginning of
    each run of that instance.
    """
    pass

  def tearDownOnce(self):
    """
    Called by the Test instance that owns this verifier, at the end of each run
    of that instance.
    """
    pass

  def In(self, needle, haystack):
    self.IsTrue(needle in haystack, '%s not in %s' % (needle, haystack))

  def RegexGroups(self, regex, s, expected_groups):
    """Verify that a regular expression returns the given groups.

    Args:
      regex: A regular expression object.  If it's a string, it will be compiled
          with no options.
      s: The input string to match the regex against.
      expected_groups: A tuple of groups.  If None, then the regex shouldn't
          match the string.
    """
    if isinstance(regex, basestring):
      regex = re.compile(regex)

    match = regex.match(s)
    if expected_groups is None:
      if match is not None:
        self.fail('Expected no match, got %s' % (match.groups(),))
    else:
      if match is None:
        self.fail('Expected groups, got None')
      self.Equal(match.groups(), expected_groups)

  def RegexMatch(self, regex, s, expected):
    """Verify that a regular expression matches the given string.

    Args:
      regex: A regular expression object.  If it's a string, it will be compiled
          with no options.
      s: The input string to match the regex against.
      expected: A string that should equal match.group(0).  If None, then the
         regex shouldn't match the string.
    """
    if isinstance(regex, basestring):
      regex = re.compile(regex)

    match = regex.match(s)
    if expected is None:
      if match is not None:
        self.fail('Expected no match, got %s' % match.group(0))
    else:
      if match is None:
        self.fail('Expected a match, got None')
      self.Equal(match.group(0), expected)

  def LongStringsEqual(self, left, right, ignore_whitespace=False,
      ignore_all_whitespace=False):
    """
    ignore_whitespace: Ignores leading/trailing whitespace on a line-by-line
        basis.
    ignore_all_whitespace: Strips each element of ALL internal whitespace before
        comparison)
    """
    # TODO: Fold this into testutils, perhaps
    if ignore_whitespace:
      left = _StripLines(left)
      right = _StripLines(right)

    if ignore_all_whitespace:
      left = re.sub(r'\s+', '', left)
      right = re.sub(r'\s+', '', right)

    testutils.assertMultilineEqual(self._dummy, left, right)

  def EqualAsJson(self, left, right, ignore_whitespace=False):
    """Compares to values as JSON strings.

    Shows a multiline diff if they're not equal.

    If the either 'left' or 'right' is not already a string (should be JSON),
    then it's encoded, with indent 2, and keys sorted.  This is the "normalized
    form".

    Caveats: The Python dictionaries {1: 3} and {"1": 3} are equal, because in
    JSON, keys must be strings.
    """
    if not isinstance(left, basestring):
      left = json.dumps(left, indent=2, sort_keys=True)
    if not isinstance(right, basestring):
      right = json.dumps(right, indent=2, sort_keys=True)

    # Ignore whitespace can make it easier to debug diffs that occur because of
    # "hierarchy level" problems.  (A given value isn't wrong, it's in the wrong
    # place in the tree)
    self.LongStringsEqual(left, right, ignore_whitespace=ignore_whitespace)


#
# Decorators
#


# TODO: It might be nice to predefine some labels, like:
# @documentation
def labels(*labels):
  """Decorator for adding labels to test methods."""

  def decorate(f):
    f.labels = labels
    return f
  return decorate


def only_verify(*labels):
  """
  Decorator that says that this test method should only be run with verifiers
  with the given labels.
  """

  def decorate(f):
    f.only_verify = labels
    return f
  return decorate


def no_verify(*labels):
  """
  Decorator that says that this test method should NOT be run with verifiers
  with the given labels.
  """
  def decorate(f):
    f.no_verify = labels
    return f
  return decorate
  

def HasLabel(test_method, label_name):
  """Returns whether a test method has a given label."""
  if hasattr(test_method, 'labels'):
    return label_name in test_method.labels
  else:
    return False


#
# Test classes
#

class Test(object):
  """Test objects can be used to share state between individual test cases.

  Well, actually it's not "state", but usually constant objects that are
  initialized (to save initialization cost).

  A Test can also be used to group test cases that can be run in multiple ways
  (e.g. with Python 2.{4,5,6}, or against different implementations of the same
  library (Pure Python/C-accelerated Python/JavaScript/whatever).  In this case,
  they should be instantiated with multiple verifiers.

  In order to get all the assert methods, this is a subclass of
  unittest.TestCase.

  TODO: Tests should be able to require Fixtures (Setup/TearDown pairs).
  """

  # Used to instantiate the class.
  VERIFIERS = [StandardVerifier]

  # Used to locate testdata, relative to this module that the test class lives
  # in.
  DATA_RELATIVE_PATH = 'testdata'

  def __init__(self, verifier=None, data=None, log=None):
    # self.verify is a standard naming convension
    if verifier:
      self.verify = verifier
    else:
      # Instantiate the first verifier with no arguments.  By default this is
      # StandardVerifier()
      self.verify = self.VERIFIERS[0]()

    if data:
      self.data = data
    else:
      # Get the filename of the module that contains this class.  Is there a
      # simpler way to do this?
      module_file = sys.modules[self.__class__.__module__].__file__
      module_dir = os.path.dirname(module_file)
      base_dir = os.path.join(module_dir, self.DATA_RELATIVE_PATH)
      self.data = testdata.DiskDataLoader(base_dir)

    self.log = log or DefaultLog()

  def setUp(self):
    pass

  def tearDown(self):
    pass

  def setUpOnce(self):
    self.verify.setUpOnce()

  def tearDownOnce(self):
    self.verify.tearDownOnce()

  def BeforeMethod(self, method):
    """Should be called before running a test method."""
    self.verify.BeforeMethod(method)

  def AfterMethod(self, method):
    """Should be called after running a test method."""
    self.verify.AfterMethod(method)

  def __repr__(self):
    return '%s with verifier %s' % (
        self.__class__.__name__, self.verify.__class__.__name__)

  def GetTestMethods(self, test_method_filter):
    """Get the test methods to run on this Test instance.
    
    It looks at our verifier and respects the only_verify and no_verify labels.
    """
    verifier_labels = set(self.verify.LABELS)

    for method in GetTestMethods(self, test_method_filter):
      only_verify = getattr(method, 'only_verify', [])
      no_verify = getattr(method, 'no_verify', [])

      # Skip this method if any of its no_verify labels matches a verifier
      # label.
      skip = False
      for label in no_verify:
        if label in verifier_labels:
          skip = True

      if skip:
        continue

      # Skip this method if it has only_verify labels, and if none of the
      # verifier labels matches.
      if only_verify:
        skip = True
        for label in only_verify:
          if label in verifier_labels:
            skip = False

      if skip:
        continue

      yield method


class PyUnitCompatibleTest(Test):
  """For converting old test cases easily."""

  def __init__(self, verifier, log=None):
    Test.__init__(self, verifier, log=log)

  # TODO: Need a lot more of these... Maybe map __getattr__ to look up PyUnit
  # methods

  def assertRaises(self, *args, **kwargs):
    self.verify.Raises(*args, **kwargs)

  def assert_(self, *args, **kwargs):
    self.verify.IsTrue(*args, **kwargs)

  def assertEqual(self, *args, **kwargs):
    self.verify.Equal(*args, **kwargs)


#
# End Test classes
#


def _GetSubclasses(module, base_class):
  for name, cls in inspect.getmembers(module, inspect.isclass):
    if issubclass(cls, base_class):
      yield cls


def MakeTestClassFilter(regex=None, label=None):
  """
  Args:
    regex: A regular expression string.
    label: label to filter by

  Returns:
    A predicate that takes a *class*
  """
  if regex:
    regex = re.compile(regex)

  def Filter(cls):
    class_labels = getattr(cls, 'LABELS', [])

    if label and label not in class_labels:
      return False

    if regex and not regex.search(cls.__name__):
      return False

    return True

  return Filter


def GetTestClasses(module, test_class_filter=lambda x: True):
  """Get the Test *classes* in a module."""
  return [cls for cls in _GetSubclasses(module, Test) if test_class_filter(cls)]


def _MakeTestMethodFilter(regex, filter_label):
  """
  Args:
    regex: A regular expression string.
    labels: List of labels
  """
  if regex:
    regex = re.compile(regex)

  def Filter(method):
    method_labels = getattr(method, 'labels', [])

    if filter_label and filter_label not in method_labels:
      return False

    if regex and not regex.search(method.__name__):
      return False

    return True

  return Filter


def GetTestMethods(test, test_method_filter):
  """Get the test *methods* in a Test instance.

  Args:
    test: A Test() instance
    test_method_filter: Predicate that says whether to run a given method.

  This is a method rather than a free function so that subclasses can
  override it.
  """ 
  for name, method in inspect.getmembers(test, inspect.ismethod):
    # Default naming convention
    if not name.startswith('test'):
      continue
    if not test_method_filter(method):
      continue
    # TODO: duplicate __name__ for printing as a Record
    #setattr(method, 'name', name)
    yield method



def _DescribeMethod(method, test):
  return '%s %s' % (method.__name__, test.verify.__class__.__name__)


class TestVisitor(object):
  """Abstract base class.

  Example implementations:
    TestLister
    SerialTestRunner
    ParallelTestRunner: Puts things in a queue to be read by worker threads
    DistributedTestRunner: Puts things in a message queue to be read by
      distributed worker processes
  """

  def OpenTestRun(self):
    pass

  def CloseTestRun(self):
    pass

  def OpenModule(self, module):
    pass

  def CloseModule(self, module):
    pass

  def OpenTest(self, test):
    pass

  def CloseTest(self, test):
    pass

  def OnTestMethod(self, method):
    pass

  def Success(self):
    """Returns whether the visit succeeded."""
    raise NotImplementedError


class TestLister(TestVisitor):
  """Just show tests, without running them."""

  def __init__(self, log):
    self.log = log

  def OpenModule(self, module):
    self.log.info("Module %r", module)
    self.log.push()

  def CloseModule(self, module):
    self.log.pop()

  def OpenTest(self, test):
    self.log.info("Test %r", test)
    self.log.push()

  def CloseTest(self, test):
    self.log.pop()

  def OnTestMethod(self, method):
    self.log.info("Method %s", method.__name__)

  def Success(self):
    """Returns whether the visit succeeded."""
    # Listing tests always succeeds
    return True


class TestDisassembler(TestLister):
  """Show disassembly of tests, without running them."""

  def OnTestMethod(self, method):
    TestLister.OnTestMethod(self, method)
    self.log.info('')
    dis.dis(method)
    self.log.info('')


class RunnableCheck(TestVisitor):
  """Check if tests are runnable, before running them.

  If there are a lot of runnable tests before an unrunnable one, this prevents
  people from wasting their time.
  """

  def __init__(self, log):
    self.log = log
    # Should be unique, since 2 tests can have the same verifier
    self.checks = set()

  def OpenTest(self, test):
    # Add this function if it hasn't been added already.
    self.checks.add(test.verify.CheckIfRunnable)

  def CloseTestRun(self):
    for check in self.checks:
      check()


class SerialTestRunner(TestLister):
  """Runs tests in process (in the Python interpreter).

  For now this is a subclass of TestLister, because we inherit all its behavior.
  """

  def __init__(self, log=None):
    self.log = log or DefaultLog()

    self.failures = []
    self.errors = []
    self.successes = []

    # So that methods know which test they're a part of
    self.current_test = None

  def HandleFailure(self, method, exc_info):
    trace = traceback.format_exc(exc_info)
    self.log.progress('F')
    self.failures.append((method, self.current_test, trace))

  def HandleError(self, method, exc_info):
    trace = traceback.format_exc(exc_info)
    self.log.progress('E')
    self.errors.append((method, self.current_test, trace))

  def HandleSuccess(self, method):
    self.log.progress('.')
    self.successes.append(method)

  def ShowSummary(self):
    # TODO: Print the names of the test cases here, etc.
    for method, test, trace in self.errors:
      self.log.info('-' * 80)
      self.log.info('*** ERROR %s', _DescribeMethod(method, test))
      self.log.info(trace)
      self.log.info('')

    # TODO: Print the names of the test cases here, etc.
    for method, test, trace in self.failures:
      self.log.info('-' * 80)
      self.log.info('*** FAILED %s', _DescribeMethod(method, test))
      self.log.info(trace)
      self.log.info('')

    #for method in self.successes:
    #  self.log.info('OK    %s', method.__name__)

    #for method, exc_info in self.errors:
    #  self.log.info('ERROR %s', method.__name__)

    #for method, exc_info in self.failures:
    #  self.log.info('FAIL  %s', method.__name__)

    # TODO: Could use a text template here, with widths
    # TODO: Report number of Tests, test cases, verifications, verifiers, etc.
    self.log.info('Summary')
    self.log.info('-------')
    self.log.info('Unexpected errors: %s', len(self.errors))
    self.log.info('Failing: %s', len(self.failures))
    self.log.info('Passing: %s', len(self.successes))

  def _RunSafely(self, method):
    try:
      method()
    except KeyboardInterrupt:
      raise
    except Exception, e:
      self.HandleError(method, sys.exc_info())

  def OpenTestRun(self):
    self.log.info('Running tests')

  def CloseTestRun(self):
    self.log.progress('\n')
    self.ShowSummary()

  def OpenModule(self, module):
    TestLister.OpenModule(self, module)

  def CloseModule(self, module):
    TestLister.CloseModule(self, module)

  def OpenTest(self, test):
    TestLister.OpenTest(self, test)
    self.current_test = test
    # If setUpOnce/setUp/tearDown/tearDownOnce raise exceptions, just let the
    # whole test run die.  The test method results will be meaningless.
    # TODO: Maybe use some other type of error reporting?
    test.setUpOnce()

  def CloseTest(self, test):
    test.tearDownOnce()
    self.current_test = None
    TestLister.CloseTest(self, test)

  def RunMethod(self, method):
    """This exists for subclasses to override."""
    method()

  def OnTestMethod(self, method):
    self.log.push()
    self.current_test.BeforeMethod(method)
    self.current_test.setUp()

    try:
      self.RunMethod(method)
    except KeyboardInterrupt:
      raise
    except AssertionError:
      self.HandleFailure(method, sys.exc_info())
    except Exception, e:
      self.HandleError(method, sys.exc_info())
    else:
      self.HandleSuccess(method)

    self.current_test.AfterMethod(method)
    self.current_test.tearDown()

    self.log.pop()

  def Success(self):
    """Returns whether the visit succeeded."""
    # Listing tests always succeeds
    return not self.errors and not self.failures


class TimeitTestRunner(SerialTestRunner):
  """For each test method, uses the timeit module to measure performance."""

  def __init__(self, log, times):
    SerialTestRunner.__init__(self, log)
    self.times = times
    self.repeats = 3

  def RunMethod(self, method):
    """Run a test method under the built-in profiler.

    TODO: Might want to provide an option to run the method many times.
    """
    self.log.info(
        'Timing %s (%s repeats, %s times each)', method.__name__, self.repeats,
        self.times)
    # This is really silly -- timeit executes in its own namespace, and
    # doesn't have a mechanism to inject function names.
    #
    # The timeit module is really meant for micro-snippets of code, where
    # function call overhead is important.  TODO: Probably want to provide
    # another timing method that doesn't use timeit, for macro-timing.
    #
    # See http://psf.upfronthosting.co.za/roundup/tracker/issue2527
    timeit.fake_timeit_method = method
    s = "fake_timeit_method()"
    timer = timeit.Timer(s)
    results = timer.repeat(self.repeats, number=self.times)
    self.log.info('Results: %s', results)


class ProfilingTestRunner(SerialTestRunner):
  """For each test method, writes a profile to disk in the given directory."""

  def __init__(self, log, profile_dir, times):
    if profile_module_warning:
      self.log.warning(
          'Using slow Python 2.4 profile module; running under Python 2.5 '
          'may give better results')

    SerialTestRunner.__init__(self, log)
    self.profile_dir = profile_dir
    self.times = times

  def RunMethod(self, method):
    """Run a test method under the built-in profiler.

    TODO: Might want to provide an option to run the method many times.
    """
    filename = os.path.join(self.profile_dir, '%s.prof' % method.__name__)
    self.log.info('Profiling %s (%s times)', method.__name__, self.times)
    # TODO: I could inline this too?
    s = "for i in xrange(%s): method()" % self.times
    profile.runctx(s, globals(), locals(), filename=filename)
    self.log.info('Wrote profiling data to %s', filename)


def ShowProfile(filename):
  """Quick and dirty method."""
  import pstats
  p = pstats.Stats(filename)
  p.sort_stats('cumulative').print_stats(25)


# TODO: Choose methods, fixtures, verifiers
# TODO: Then move them out into pipes.  Finding tests, filtering, and running,
# should all be separate steps!
TEST_RUN_PARAMS = [
    params.OptionalString(
        'method-regex', shortcut='m',
        help='Regex to filter test method names by'),
    params.OptionalString(
        'test-regex', shortcut='t',
        help='Regex to filter Test class names by'),
    # TODO: Filter by arbitrary labels (negation, etc.)
    params.OptionalString(
        'label', shortcut='l', help='Run tests with this label'),

    # TODO: need regex validator for each entry: r'2\.[0-6]'
    # TODO: default for RepeatedStrings doesn't work, but it's useful here.
    params.RepeatedStrings(
        'py-versions', help='Python interpreters to run with', default=['2.5']),

    # Various methods for visiting tests
    params.OptionalBoolean(
        'list', help='Lists tests, without running them'),
    params.OptionalBoolean(
        'dis', help='Print Python disassembly instead of running tests'),
    params.OptionalString(
        'profile-dir', help='Profile test methods and put output here'),
    params.OptionalBoolean(
        'timeit', help='Time the test methods using the timeit module'),

    params.OptionalInteger(
        'times', default=1000,
        help='Number of repetitions for --timeit and --profile-dir'),

    # This has nothing to do with running tests, but it analyzes the output of
    # --profile-dir.
    params.OptionalString(
        'show-profile', help='Summarize the profile information in this file'),
    ]


def VisitTests(visitor, tests, test_method_filter):
  """Visit a list of tests.

  Args:
    tests: A list of Test instances.
  """
  for test in tests:
    visitor.OpenTest(test)

    for method in test.GetTestMethods(test_method_filter):
      visitor.OnTestMethod(method)

    visitor.CloseTest(test)


def VisitTestModules(
    visitor, modules, test_class_filter, test_method_filter, log):
  """
  Args:
    visitor: TestVisitor instance
    module: A list of module objects to run tests on
    test_class_filter:
        A predicate that accepts a Test *class* and says whether we should visit
        it or not
    test_method_filter:
        A predicate that accepts a test *method* and says whether we should
        visit it or not
    log: Logger instance to pass to instantiated Tests and Verifiers.
  """
  visitor.OpenTestRun()

  for module in modules:
    visitor.OpenModule(module)

    # Get a list of tests in the module
    test_classes = GetTestClasses(module, test_class_filter)
    tests = []
    for test_class in test_classes:
      for verifier_class in test_class.VERIFIERS:
        # TODO: This doesn't work with verifiers with arguments
        v = verifier_class(log=log)
        test = test_class(v, log=log)
        tests.append(test)

    VisitTests(visitor, tests, test_method_filter)

    visitor.CloseModule(module)

  visitor.CloseTestRun()


def DefaultLog():
  return log.Logger()


#
# Free functions for running tests
#

def RunTests(tests, options=None, log=None):
  """Use the default test runner to run tests.

  Args:
    tests: Test instances
    options: A record instantiated with values from TEST_RUN_PARAMS

  Returns:
    False if any test failed, true otherwise.
  """
  if options is None:
    options = cmdapp.ParseArgv(sys.argv[1:], TEST_RUN_PARAMS)

  test_method_filter = _MakeTestMethodFilter(
      options.method_regex, options.label)

  log = log or DefaultLog()

  visitor = _SetupVisitor(log, options)

  # Check if the tests are runnable first.  TODO: Might not want to do this in
  # --list mode (in RunModules too)
  checker = RunnableCheck(log)
  checker.OpenTestRun()
  VisitTests(checker, tests, test_method_filter)
  checker.CloseTestRun()

  visitor.OpenTestRun()
  VisitTests(visitor, tests, test_method_filter)
  visitor.CloseTestRun()

  return visitor.Success()


def _SetupVisitor(log, options):
  """Set up a test visitor from the given options.

  Does a grab bag of other things:
    - For visitors that run tests, also check that all tests are runnable first.
    - If --show-profile was passed, then just show the profile and immediately
      exit.
  """

  # This is common code
  if options.show_profile:
    ShowProfile(options.show_profile)
    log.warning('Done showing profile; exiting.')
    sys.exit()

  if options.list:
    visitor = TestLister(log)
  elif options.dis:
    visitor = TestDisassembler(log)
  elif options.profile_dir:
    visitor = ProfilingTestRunner(log, options.profile_dir, options.times)
  elif options.timeit:
    visitor = TimeitTestRunner(log, options.times)
  else:
    visitor = SerialTestRunner(log)

  return visitor


def RunThisModule(options=None, log=None):
  """
  This method is meant to be used like this:

  if __name__ == '__main__':
    testy.RunThisModule()  # Exits
  """
  success = RunModules([__import__('__main__')], options=options, log=log)
  sys.exit(not success)


def RunModules(modules, options=None, log=None):
  """
  We find every Test class in the module, and use its has a VERIFIERS class
  attribute to instantiate it (in the "default" way, with just a logger).

  This makes testy and unittest similar with respect to the definition of a test
  module.  If the module uses a more complicated pattern, then use RunTests()
  with a list of instantiated tests.

  Args:
    module: A module object.
    options: Options for running the test.  If None, then sys.argv is parsed
        with TEST_RUN_PARAMS.
  """
  if options is None:
    options = cmdapp.ParseArgv(sys.argv[1:], TEST_RUN_PARAMS)

  # TODO: Need options for logging
  # Parse log.PARAMS above -- but need a mode for the ParseArgv to be able to
  # ignore undefined parameters.  And it could use a prefix
  log = log or DefaultLog()

  test_method_filter = _MakeTestMethodFilter(
      options.method_regex, options.label)
  test_class_filter = MakeTestClassFilter(
      options.test_regex, options.label)

  visitor = _SetupVisitor(log, options)

  checker = RunnableCheck(log)
  VisitTestModules(checker, modules, test_class_filter, test_method_filter, log)
  VisitTestModules(visitor, modules, test_class_filter, test_method_filter, log)

  return visitor.Success()


def RunInNewInterpreter(version, argv=sys.argv):
  # Try to find the new interpreter based on this one
  exe = os.path.realpath(sys.executable)
  # TODO: Windows
  new_interpreter = exe.replace('2.5', '%s.%s' % version)
  import subprocess
  new_argv = [new_interpreter, argv[0]]
  exit_code = subprocess.call(new_argv)
