# Copyright (C) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
#     * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#     * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
#     * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.


import logging
import re
import time

from webkitpy.layout_tests.controllers import test_result_writer
from webkitpy.port.driver import DriverInput, DriverOutput
from webkitpy.layout_tests.models import test_expectations
from webkitpy.layout_tests.models import test_failures
from webkitpy.layout_tests.models.test_results import TestResult
from webkitpy.layout_tests.views import printing


_log = logging.getLogger(__name__)

def run_multiple_tests(port, options, results_directory, worker_name, driver, test_input, stop_when_done):
    runner = MultipleTestRunner(port, options, results_directory, worker_name, driver, test_input, stop_when_done)
    return runner.run()

class MultipleTestRunner(object):
    (ALONGSIDE_TEST, PLATFORM_DIR, VERSION_DIR, UPDATE) = ('alongside', 'platform', 'version', 'update')

    def __init__(self, port, options, results_directory, worker_name, driver, test_input, stop_when_done):
        self._port = port
        self._filesystem = port.host.filesystem
        self._options = options
        self._results_directory = results_directory
        self._driver = driver
        self._timeout = test_input[0].timeout
        self._worker_name = worker_name
        self._test_name = test_input[0].test_name
        self._test_set = test_input
        self._num_tests = len(test_input)
        self._should_run_pixel_test = False
        self._reference_files = None
        self._stop_when_done = stop_when_done
        self._test_result = []


    def _expected_driver_output(self, test_set):
        expected_driver_output = []
        for test in test_set:
            expected_driver_output.append(DriverOutput(self._port.expected_text(test.test_name),
                                 self._port.expected_image(test.test_name),
                                 self._port.expected_checksum(test.test_name),
                                 self._port.expected_audio(test.test_name)) )
        return expected_driver_output


    def _driver_input(self, test_set):
        # The image hash is used to avoid doing an image dump if the
        # checksums match, so it should be set to a blank value if we
        # are generating a new baseline.  (Otherwise, an image from a
        # previous run will be copied into the baseline."""

        driver_inputs = []
        image_hash = None
        # Expected checksum calculation should be added here if we
        # enable pixel tests.
        for test in test_set:
            driver_inputs.append(DriverInput(test.test_name, self._timeout, image_hash, self._should_run_pixel_test)) 
        return driver_inputs

    def _process_ref_tests(self):
        ref_tests = []
        non_ref_tests = []
        # Sort ref_tests from others
        for test in self._test_set:
            if test.reference_files:
                ref_tests.append(test)
            else:
                non_ref_tests.append(test)
        return ref_tests, non_ref_tests

    def run(self):
        num_ref_tests = 0
        non_ref_iter = 0
        num_of_tests_ran = 0
        # For ref-tests, their result object is generated in this method. For the
        # non_ref tests, the result is generated by running them through the 
        # orbis driver.
        ref_tests, non_ref_tests = self._process_ref_tests()

        # Rebaselining with options --new-baseline or --reset-results
        if self._options.reset_results:
            non_ref_test_result, num_of_tests_ran =  self._run_rebaseline(non_ref_tests)
        else:
            if non_ref_tests:
                non_ref_test_result, num_of_tests_ran = self._run_compare_test(non_ref_tests)
        
        for test in self._test_set:
            if test.reference_files:
                if self._port.get_option('no_ref_tests') or self._options.reset_results:
                    reftest_type = set([reference_file[0] for reference_file in test.reference_files])
                    result = TestResult(test.test_name, reftest_type=reftest_type)
                    result.type = test_expectations.SKIP
                    self._test_result.append(result)
                    num_ref_tests += 1
                    # Currently the ref_tests are not enabled. Use the below method to
                    # enable them. The result object generated should be per test, which
                    # should then be added to self._test_result similar to the above case.
            else:
                if(test.test_name == non_ref_tests[non_ref_iter].test_name):
                    # If a test timed out during execution, then the num of test results 
                    # will be less than the num of tests in the non_ref_test list
                    if non_ref_iter < num_of_tests_ran:
                        self._test_result.append(non_ref_test_result[non_ref_iter])
                        non_ref_iter += 1
        return self._test_result, (num_of_tests_ran + num_ref_tests)

    def _run_compare_test(self, test_set):
        driver_output, num_tests_ran = self._driver.run_multiple_tests(self._driver_input(test_set), self._stop_when_done)
        expected_driver_output = self._expected_driver_output(test_set)
        test_result = []

        # For multi-test runner mode, the driver_output is a list of DriverOutput objects
        for x in range(0, num_tests_ran):
            if self._options.ignore_metrics:
                expected_driver_output[x].strip_metrics()
                driver_output[x].strip_metrics()
            result = self._compare_output(test_set[x].test_name, expected_driver_output[x], driver_output[x])
            if self._options.new_test_results:
                self._add_missing_baselines(result, driver_output[x], test_set[x].test_name)
            test_result.append(result)
            test_result_writer.write_test_result(self._filesystem, self._port, self._results_directory, test_set[x].test_name, driver_output[x], expected_driver_output[x], result.failures)
        return test_result, num_tests_ran
    
    def _run_rebaseline(self, test_set):
        driver_output, num_tests_ran = self._driver.run_multiple_tests(self._driver_input(test_set), self._stop_when_done)
        test_result = []
        for x in range(0, num_tests_ran):
            failures = self._handle_error(driver_output[x])
            test_result_writer.write_test_result(self._filesystem, self._port, self._results_directory, test_set[x].test_name, driver_output[x], None, failures)
            # FIXME: It the test crashed or timed out, it might be better to avoid
            # to write new baselines.
            self._overwrite_baselines(driver_output[x], test_set[x].test_name)
            test_result.append(TestResult(test_set[x].test_name, failures, driver_output[x].test_time, driver_output[x].has_stderr(), pid=driver_output[x].pid))
        return test_result, num_tests_ran

    def _overwrite_baselines(self, driver_output, test_name):
        location = self.VERSION_DIR if self._options.add_platform_exceptions else self.UPDATE
        self._save_baseline_data(driver_output.text, '.txt', location, test_name)
        self._save_baseline_data(driver_output.audio, '.wav', location, test_name)
        if self._should_run_pixel_test:
            self._save_baseline_data(driver_output.image, '.png', location, test_name)

    def _add_missing_baselines(self, test_result, driver_output, test_name):
        missingImage = test_result.has_failure_matching_types(test_failures.FailureMissingImage, test_failures.FailureMissingImageHash)
        if test_result.has_failure_matching_types(test_failures.FailureMissingResult):
            self._save_baseline_data(driver_output.text, '.txt', self._location_for_new_baseline(driver_output.text, '.txt'), test_name)
        if test_result.has_failure_matching_types(test_failures.FailureMissingAudio):
            self._save_baseline_data(driver_output.audio, '.wav', self._location_for_new_baseline(driver_output.audio, '.wav'), test_name)
        if missingImage:
            self._save_baseline_data(driver_output.image, '.png', self._location_for_new_baseline(driver_output.image, '.png'), test_name)

    def _save_baseline_data(self, data, extension, location, test_name):
        if data is None:
            return
        port = self._port
        fs = self._filesystem
        if location == self.ALONGSIDE_TEST:
            output_dir = fs.dirname(port.abspath_for_test(test_name))
        elif location == self.VERSION_DIR:
            output_dir = fs.join(port.baseline_version_dir(), fs.dirname(test_name))
        elif location == self.PLATFORM_DIR:
            output_dir = fs.join(port.baseline_platform_dir(), fs.dirname(test_name))
        elif location == self.UPDATE:
            output_dir = fs.dirname(port.expected_filename(test_name, extension))
        else:
            raise AssertionError('unrecognized baseline location: %s' % location)

        fs.maybe_make_directory(output_dir)
        output_basename = fs.basename(fs.splitext(test_name)[0] + "-expected" + extension)
        output_path = fs.join(output_dir, output_basename)
        _log.info('Writing new expected result "%s"' % port.relative_test_filename(output_path))
        port.update_baseline(output_path, data)


    _render_tree_dump_pattern = re.compile(r"^layer at \(\d+,\d+\) size \d+x\d+\n")


    def _location_for_new_baseline(self, data, extension):
        if self._options.add_platform_exceptions:
            return self.VERSION_DIR
        if extension == '.png':
            return self.PLATFORM_DIR
        if extension == '.wav':
            return self.ALONGSIDE_TEST
        if extension == '.txt' and self._render_tree_dump_pattern.match(data):
            return self.PLATFORM_DIR
        return self.ALONGSIDE_TEST

    def _handle_error(self, driver_output, reference_filename=None):
        """Returns test failures if some unusual errors happen in driver's run.

        Args:
          driver_output: The output from the driver.
          reference_filename: The full path to the reference file which produced the driver_output.
              This arg is optional and should be used only in reftests until we have a better way to know
              which html file is used for producing the driver_output.
        """
        failures = []
        fs = self._filesystem
        if driver_output.timeout:
            failures.append(test_failures.FailureTimeout(bool(reference_filename)))

        if reference_filename:
            testname = self._port.relative_test_filename(reference_filename)
        else:
            testname = self._test_name

        if driver_output.crash:
            failures.append(test_failures.FailureCrash(bool(reference_filename),
                                                       driver_output.crashed_process_name,
                                                       driver_output.crashed_pid))
            if driver_output.error:
                _log.debug("%s %s crashed, (stderr lines):" % (self._worker_name, testname))
            else:
                _log.debug("%s %s crashed, (no stderr)" % (self._worker_name, testname))
        elif driver_output.error:
            _log.debug("%s %s output stderr lines:" % (self._worker_name, testname))
        for line in driver_output.error.splitlines():
            _log.debug("  %s" % line)
        return failures

    def _compare_output(self, test_name, expected_driver_output, driver_output):
        failures = []
        failures.extend(self._handle_error(driver_output))

        if driver_output.crash:
            # Don't continue any more if we already have a crash.
            # In case of timeouts, we continue since we still want to see the text and image output.
            return TestResult(test_name, failures, driver_output.test_time, driver_output.has_stderr(), pid=driver_output.pid)

        failures.extend(self._compare_text(expected_driver_output.text, driver_output.text))
        # Note: Add failure calculation for image when pixel tests are enabled.
        return TestResult(test_name, failures, driver_output.test_time, driver_output.has_stderr(), pid=driver_output.pid)

    def _compare_text(self, expected_text, actual_text):
        failures = []
        if (expected_text and actual_text and
            # Assuming expected_text is already normalized.
            self._port.do_text_results_differ(expected_text, self._get_normalized_output_text(actual_text))):
            failures.append(test_failures.FailureTextMismatch())
        elif actual_text and not expected_text:
            failures.append(test_failures.FailureMissingResult())
        return failures

    def _get_normalized_output_text(self, output):
        """Returns the normalized text output, i.e. the output in which
        the end-of-line characters are normalized to "\n"."""
        # Running tests on Windows produces "\r\n".  The "\n" part is helpfully
        # changed to "\r\n" by our system (Python/Cygwin), resulting in
        # "\r\r\n", when, in fact, we wanted to compare the text output with
        # the normalized text expectation files.
        return output.replace("\r\r\n", "\r\n").replace("\r\n", "\n")