from __future__ import with_statement
import os.path
from os.path import join
import sys
from optparse import OptionParser
import logging
import re
import urlparse
import hashlib
import urllib
import itertools
import cPickle
import subprocess
import time

# Reviewboard is bundled in the third_party directory. Only the diff parser is
# used.
try:
    import reviewboard
except:
    sys.path.append(join(os.path.dirname(__file__), "..", "third_party"))
from reviewboard.scmtools.hg import HgDiffParser
from reviewboard.diffviewer.parser import DiffParser

from browsertests.utils import cli_wrapper, which
from browsertests.tests.models import Status, Test, Section

log = logging.getLogger(__name__)

# utility functions

def joinposix(*paths):
    return '/'.join(paths)

class ImportedTest(object):
    TEXT_EXTENSIONS = ["css", "html", "xhtml", "js", "xml", "svg"]

    SRC_HREF_RE = re.compile("(?:src|href)\s*=\s*(?P<quote>[\"'])(?P<url>[^\"']+)(?P=quote)")
    CSS_URL_RE = re.compile("url\(\s*[\"']?(?P<url>[^\"')]+)[\"']?\s*\)")
    CSS_IMPORT_RE = re.compile("@import\s+(?P<quote>[\"'])(?P<url>[^\"']+)(?P=quote)")

    file_based = True

    def __init__(self, testid):
        self.testid = testid

    def __eq__(self, other):
        """Two tests of different type are equal if they have the same testid.
        The purpose it to avoid having two different tests with the same id in
        the same set even if they have a different type"""
        if not isinstance(other, ImportedTest):
            return False
        return self.testid == other.testid

    def __hash__(self):
        return hash(self.testid)

    def __repr__(self):
        return "<%s %s>" % (self.__class__.__name__, self.testid)

    @classmethod
    def create_from_test(cls, test):
        classname = test.type.capitalize()
        itest_class = sys.modules[__name__].__dict__.get(classname, None)
        assert itest_class, "Can't find class for test type %s" % test.type
        return itest_class.do_create_from_test(test)

    @classmethod
    def do_create_from_test(cls, test):
        itest = cls(test.id)
        return itest

    def get_test(self):
        """Returns a the test from the database with the same identifier.
        Note: The returned test may not be of the same type."""
        return Test.objects.get(pk=self.testid, deleted=False)

    def exists(self):
        raise NotImplementedError()

    def _get_related_resources(self, resource):
        def find_resources(resource):
            full_path = join(self.tests_dir, resource)

            ext = os.path.splitext(resource)[1][1:]
            if not ext in self.TEXT_EXTENSIONS:
                return set()

            content = open(full_path).read()
            iterators = [(m.groupdict()["url"] for m in regex.finditer(content)) for
                            regex in [self.SRC_HREF_RE, self.CSS_URL_RE, self.CSS_IMPORT_RE]]

            found_resources = set()
            base = "/" + os.path.dirname(resource)
            if not base.endswith("/"):
                base += "/"

            for url in itertools.chain(*iterators):
                url = urlparse.urljoin(base, url)
                # Only keep the path from the parsed url.  We check if the file
                # exists on the filesystem in order to crawl that resource.
                path = urlparse.urlparse(url).path[1:]
                if not path or not os.path.isfile(join(self.tests_dir, path)):
                    continue
                found_resources.add(path)
            return found_resources

        tocrawl = set([resource])
        all_resources = set([resource])
        while tocrawl:
            resources = find_resources(tocrawl.pop())
            tocrawl.update(resources - all_resources)
            all_resources.update(resources)

        return all_resources

    def _add_flags(self, test, *args):
        test._flags.update(args)

    def _compute_common_flags(self, test):
        schemes = [urlparse.urlparse(u).scheme for u in test.test_resources]
        if "data" in schemes:
            self._add_flags(test, "dataurl")

        extensions = [os.path.splitext(u)[1][1:] for u in test.test_resources]
        extensions_as_flag = ("svg", "xhtml")
        for ext in extensions_as_flag:
            if ext in extensions:
                self._add_flags(test, ext)

    def _compute_flags(self, test):
        raise NotImplementedError()

    def _compute_moz_flags(self, test):
        extensions = [os.path.splitext(u)[1][1:] for u in test.test_resources]
        if "xul" in extensions:
            self._add_flags(test, "moz", "moz:xul")

        if "netscape.security" in test.resources_text_content or \
           "EventUtils.js" in test.resources_text_content:
            self._add_flags(test, "moz", "moz:security")
        if "Components.interfaces" in test.resources_text_content or \
           "Components.classes" in test.resources_text_content:
            self._add_flags(test, "moz", "moz:prop_objects")

        # XXX not accurate. It should check against the hosts in
        # build/pgo/server-locations.txt
        if "example.org" in test.resources_text_content:
            self._add_flags(test, "proxy")

    def _fixup_test(self, test, path_as_id=None):
        if path_as_id:
            test.id = path_as_id
            test.file = path_as_id

            # XXX not correct for http tests and ssl stuff.
            url = self.importer_manager.SERVER_URL + path_as_id
            test.url = url

        if not test.full_id:
            test.full_id = test.id

        for r in test.test_resources:
            test.resources.update(self._get_related_resources(r))

        test.resources_content = ""
        test.resources_text_content = ""

        for r in sorted(test.resources):
            res_path = join(self.tests_dir, r)
            if not os.path.exists(res_path):
                continue
            with open(res_path) as f:
                content = f.read()
                test.resources_content += content
                if not os.path.splitext(r)[1][1:] in self.TEXT_EXTENSIONS:
                    continue
                # SimpleTest.js references Mozilla specific objects, but it
                # does not use them if they are not available. So don't include
                # that file in the content scanned for Mozilla specific objects.
                if r == "tests/SimpleTest/SimpleTest.js":
                    continue
                test.resources_text_content += content

        test.hash = hashlib.md5(test.resources_content).hexdigest()
        self._compute_common_flags(test)
        self._compute_flags(test)

        self.importer_manager.metadata.update_test(test)

        # Could be large, so free them now.
        del test.resources_content
        del test.resources_text_content

    @classmethod
    def get_imported_tests(cls, path):
        itest = cls(path)
        if not itest.exists():
            return []
        return [itest]

    @classmethod
    def update_dirty_itests(cls, resource, lines, dirty_itests):
        return False

    def create_test(self):
        raise NotImplementedError()

class Mochitest(ImportedTest):
    def __init__(self, path):
        super(Mochitest, self).__init__(path)
        self.path = path

    def exists(self):
        dir, file = os.path.split(self.path)
        if not os.path.isfile(join(self.tests_dir, self.path)):
            return False

        # from mozilla/testing/mochitest/server.js :: isTest()
        # A bit more restrictive though: filename must start with "test_", not
        # only contain that string.
        return file.startswith("test_") and \
            not ".js" in file and \
            not ".css" in file and \
            not re.search("\^headers\^$", file) and \
            not file.endswith("-expected.txt") # Added this condition
                                               # to skip layouttests.

    def _compute_flags(self, test):
        self._compute_moz_flags(test)

    def create_test(self):
        assert self.exists()
        dir, file = os.path.split(self.path)

        test = Test()
        test.type = "mochitest"
        full_path = join(self.tests_dir, self.path)
        assert os.path.exists(full_path), "path %s does not exist" % full_path

        test.resources = set()
        test.test_resources = set([self.path])
        self._fixup_test(test, path_as_id=self.path)
        return test

class Layouttest(ImportedTest):
    REPLACE_EXT_RE = re.compile("\.[^\.]*$")
    LTC_CALLS_RE = re.compile("layoutTestController\.(\w+)")

    def __init__(self, path):
        super(Layouttest, self).__init__(path)
        self.path = path

    def _pre_create_test(self):
        dir, file = os.path.split(self.path)

        # From run-webkit-tests, $fileFilter function (and below for svg)
        ALLOWED_EXTS = ("html", "shtml", "xml", "xhtml", "pl", "php", "svg")
        ext = os.path.splitext(file)[1]
        if not ext or not ext[1:] in ALLOWED_EXTS:
            return None

        expected_file = self.REPLACE_EXT_RE.sub("", file)
        expected_file += "-expected.txt"
        expected_path = joinposix(dir, expected_file) if dir else expected_file

        if not os.path.isfile(join(self.tests_dir, expected_path)):
            return None

        test = Test()
        test.type = "layouttest"
        test.expected_path = expected_path

        full_path = join(self.tests_dir, self.path)
        assert os.path.exists(full_path), "path %s does not exist" % full_path

        exp_content = open(join(self.tests_dir, expected_path)).read()
        # Save this on the test for use in _compute_flags
        test.exp_content = exp_content

        # See run-webkit-tests::isTextOnlyTest()
        if exp_content.startswith("layer at"):
            return None

        return test

    def exists(self):
        if not os.path.isfile(join(self.tests_dir, self.path)):
            return False

        test = self._pre_create_test()
        return test != None

    def _compute_flags(self, test):
        if "CONSOLE MESSAGE:" in test.exp_content or "ALERT:" in test.exp_content:
            self._add_flags(test, "ltmessages")

        # TODO: replace this code by a flag set in the metadata file

        ## XXX should not be path specific
        #WEBKIT_FILE_PREFIX = "/WebKit/LayoutTests/"
        #
        #use_wk_http = use_wk_ssl = False
        ## Keep in sync with run-webkit-tests, around line 555
        #if test_file.startswith(WEBKIT_FILE_PREFIX + "http"):
        #    if not test_file.startswith(WEBKIT_FILE_PREFIX + "http/tests/local/") and \
        #       not test_file.startswith(WEBKIT_FILE_PREFIX + "http/tests/ssl/") and \
        #       not test_file.startswith(WEBKIT_FILE_PREFIX + "http/tests/media/"):
        #        use_wk_http = True
        #    elif test_file.startswith(WEBKIT_FILE_PREFIX + "http/tests/ssl/"):
        #        use_wk_ssl = True
        #if use_wk_http or use_wk_http:
        #    test.flags_set.add("layouttesthttp")
        #
        #url = self.server_url + test_file
        #url_for_fetch = url
        #if use_wk_http:
        #    url = "http://127.0.0.1:8000" + \
        #         test_file.replace(WEBKIT_FILE_PREFIX + "http", "")
        #elif use_wk_ssl:
        #    url = "http://127.0.0.1:8443" + \
        #         test_file.replace(WEBKIT_FILE_PREFIX + "http", "")
        #log.debug("url: %s", url)

        funcs = set(self.LTC_CALLS_RE.findall(test.resources_text_content))
        unprivilegedFuncs = set(["dumpAsText", "waitUntilDone", "notifyDone"])

        if "waitUntilDone" in funcs:
            self._add_flags(test, "ltwait")
        privilegedFuncs = funcs - unprivilegedFuncs
        if privilegedFuncs:
            self._add_flags(test, "layouttests")
            # TODO: save this for capability testing
            #test.pfuncs = privilegedFuncs

        # XXX \b not working?
        if re.search("\Walert\(|\Wprompt\(|\Wconfirm\(", test.resources_text_content):
            self._add_flags(test, "ltalert")

        del test.exp_content

    def create_test(self):
        assert self.exists()
        test = self._pre_create_test()
        dir, file = os.path.split(self.path)

        test.resources = set([test.expected_path])
        test.test_resources = set([self.path])
        self._fixup_test(test, path_as_id=self.path)
        return test

class Reftest(ImportedTest):
    file_based = False
    hash_id = True

    def __init__(self, testid, full_id):
        super(Reftest, self).__init__(testid)
        self.full_id = full_id

    @classmethod
    def do_create_from_test(cls, test):
        return cls(test.pk, test.full_id)

    def exists(self):
        assert self.file_based, "Should only called for file based tests"

    @classmethod
    def _build_testid(cls, manifest_path, line):
        if not cls.hash_id:
            return cls._build_full_id(manifest_path, line)
        dir, file = os.path.split(manifest_path)
        return joinposix(dir, "reftest:%s" % hashlib.md5(line).hexdigest())

    @classmethod
    def _build_full_id(cls, manifest_path, line):
        dir, file = os.path.split(manifest_path)
        return joinposix(dir, "reftest:%s" % line)

    @classmethod
    def _line_to_itest(cls, manifest_path, line, path="", line_no=-1):
        if not line or line[0] == "#":
            return None
        line = re.sub("\s+#.*$", "", line)
        # strip leading and trailing whitespace
        line = line.strip()
        if not line:
            return None
        if not cls._parse_line(line, path, line_no):
            return None

        dir, file = os.path.split(manifest_path)
        testid = cls._build_testid(manifest_path, line)
        full_id = cls._build_full_id(manifest_path, line)
        itest = Reftest(testid, full_id)
        # The following attributes are temporary, not persisted in the database
        # object.  They will be used in create_test() for creating the db object.
        itest.line = line
        itest.line_no = line_no
        itest.directory = dir
        return itest

    # More or less direct port of Mozilla reftest.js::ReadManifest() in Python
    @classmethod
    def _parse_line(cls, line, path="", line_no=-1):
        EXPECTED_PASS = 0;
        EXPECTED_FAIL = 1;
        EXPECTED_RANDOM = 2;
        EXPECTED_DEATH = 3;  # test must be skipped to avoid e.g. crash/hang
        EXPECTED_LOAD = 4; # test without a reference (just test that it does
                           # not assert, crash, hang, or leak)
        urls = []
        #log.debug("reftest parsing line %s", line)

        ##for (line_no, line) in enumerate(open(f)):
        ##line_no += 1
        #assert line[0] != "#"
        #
        #line = re.sub("\s+#.*$", "", line)
        #
        ## strip leading and trailing whitespace
        #line = line.strip()
        #assert line

        items = line.split() # split on whitespace
        #print "line", line

        expected_status = EXPECTED_PASS;

        failure_types = "";
        while re.match("^(fails|random|skip|asserts)", items[0]):
            # XXX this store a failure_types for asserts.
            failure_types += " " + items[0]
            item = items.pop(0)
            stat = ""
            cond = False
            m = re.match("^(fails|random|skip)-if(\(.*\))$", item)
            if m:
                stat = m.group(1)
#                // Note: m[2] contains the parentheses, and we want them.
#                cond = Components.utils.evalInSandbox(m[2], sandbox);
                cond = False # XXX
            elif re.match("^(fails|random|skip)$", item):
                stat = item
                cond = True
            elif re.match("^asserts\((\d+)(-\d+)?\)$", item):
                cond = False
                # XXX asserts are ignored for now
            elif re.match("^asserts-if\((.*?),(\d+)(-\d+)?\)$", item):
                cond = False
                # XXX asserts are ignored for now
            else:
                raise Exception("Error in manifest file %s line %i" %
                                (path, line_no))

#            // XXX expected_status not yet implemented
#            if (!gIsExtractingTests)
#            if (cond) {
#                if (stat == "fails") {
#                    expected_status = EXPECTED_FAIL;
#                } else if (stat == "random") {
#                    expected_status = EXPECTED_RANDOM;
#                } else if (stat == "skip") {
#                    expected_status = EXPECTED_DEATH;
#                }
#            }
#        }
#
        run_http = items[0] == "HTTP"
        if run_http:
            items.pop(0)

        # TODO: v2
        run_http = False
        http_depth = None
        if items[0] == "HTTP":
            run_http = True
            http_depth = 0
            items.pop(0)
        elif re.match("HTTP\(\.\.(\/\.\.)*\)", items[0]):
            # Accept HTTP(..), HTTP(../..), HTTP(../../..), etc.
            run_http = True
            http_depth = (len(items[0]) - 5) / 3
            items.pop(0)

        # XXX do something with http_depth
        # XXX commented .js code below is not up to date.

        def uri_to_file(uri):
            return re.sub("[\?#].*$", "", uri)

        if items[0] == "include":
            if len(items) != 2 or run_http:
                raise Exception("Error in manifest file %s line %i" %
                                (path, line_no))
            log.debug("include statement ignored (%s)", line)
            # XXX add an exception for this file that is named reftests.list instead
            #  of reftest.list. Should be Mozilla specific and is not included now.
            if items[1] == "../../content/html/document/reftests/reftests.list":
                return None
            assert items[1].endswith("/reftest.list"), (
                     "Including a reftest manifest file that will not be "
                     "found (%s)") % items[1]
#            var incURI = gIOService.newURI(items[1], null, listURL);
#            secMan.checkLoadURI(aURL, incURI,
#                                CI.nsIScriptSecurityManager.DISALLOW_SCRIPT);
#            ReadManifest(incURI);
        elif items[0] == "load":
            if expected_status == EXPECTED_PASS:
                expected_status = EXPECTED_LOAD
            if len(items) != 2 or \
                (expected_status != EXPECTED_LOAD and \
                 expected_status != EXPECTED_DEATH):
                raise Exception("Error in manifest file %s line %i" %
                                (path, line_no))
#            var [testURI] = runHttp
#                            ? ServeFiles(aURL,
#                                         listURL.file.parent, [items[1]])
#                            : [gIOService.newURI(items[1], null, listURL)];
            test_uri = items[1]
#            var prettyPath = runHttp
#                           ? gIOService.newURI(items[1], null, listURL).spec
#                           : testURI.spec;
#            secMan.checkLoadURI(aURL, testURI,
#                                CI.nsIScriptSecurityManager.DISALLOW_SCRIPT);
#            gURLs.push( { equal: true /* meaningless */,
#                          expected: expected_status,
#                          prettyPath: prettyPath,
#                          prettyPath2: null,
#                          url: testURI,
#                          url2: null,
#                          failureTypes: failureTypes } );

            return {"equal": True, # meaningless
                    "expected": expected_status,
                    # XXX not yet implemented
                    "run_http": run_http,
                    #"prettyPath": prettyPath,
                    #"prettyPath2": null,
                    "url": test_uri,
                    "url2": "",
                    "file": uri_to_file(test_uri),
                    "file2": "",
                    "failure_types": failure_types}

        elif items[0] == "==" or items[0] == "!=":
            if len(items) != 3:
                raise Exception("Error in manifest file %s line %i" %
                                (path, line_no))
#            var [testURI, refURI] = runHttp
#                                  ? ServeFiles(aURL,
#                                               listURL.file.parent, [items[1], items[2]])
#                                  : [gIOService.newURI(items[1], null, listURL),
#                                     gIOService.newURI(items[2], null, listURL)];
            (test_uri, ref_uri) = (items[1], items[2])
#            var prettyPath = runHttp
#                           ? gIOService.newURI(items[1], null, listURL).spec
#                           : testURI.spec;
#            var prettyPath2 = runHttp
#                           ? gIOService.newURI(items[2], null, listURL).spec
#                           : refURI.spec;
#            secMan.checkLoadURI(aURL, testURI,
#                                CI.nsIScriptSecurityManager.DISALLOW_SCRIPT);
#            secMan.checkLoadURI(aURL, refURI,
#                                CI.nsIScriptSecurityManager.DISALLOW_SCRIPT);
#            gURLs.push( { equal: (items[0] == "=="),
#                          expected: expected_status,
#                          prettyPath: prettyPath,
#                          prettyPath2: prettyPath2,
#                          url: testURI,
#                          url2: refURI,
#                          failureTypes: failureTypes } );
            return {"equal": (items[0] == "=="),
                         "expected": expected_status,
                         # XXX not yet implemented
                         "run_http": run_http,
                         #"prettyPath": prettyPath,
                         #"prettyPath2": prettyPath2,
                         "url": test_uri,
                         "url2": ref_uri,
                         "file": uri_to_file(test_uri),
                         "file2": uri_to_file(ref_uri),
                         "failure_types": failure_types }
        else:
            raise Exception("Error parsing manifest file %s line %s: '%s'" %
                            (path, line_no, line))

    @classmethod
    def update_dirty_itests(cls, resource, lines, dirty_itests):
        dir, file = os.path.split(resource)
        if file != "reftest.list":
            return False

        for line in lines:
            if not line[0] in ("-", "+"):
                continue
            itest = cls._line_to_itest(resource, line[1:])
            if not itest:
                continue
            kind = ("added" if line[0] == "+" else "deleted")
            dirty_itests[kind].add(itest)

        # remove intersection, could happen with whitespace changes
        intersection = dirty_itests["added"] & dirty_itests["deleted"]
        dirty_itests["added"] -= intersection
        dirty_itests["deleted"] -= intersection

        return True

    @classmethod
    def get_imported_tests(cls, path):
        dir, file = os.path.split(path)
        if file != "reftest.list":
            return []

        itests = set()
        with open(join(cls.tests_dir, path)) as f:
            for (line_no, line) in enumerate(f):
                line_no += 1
                itest = cls._line_to_itest(path, line, path, line_no)
                if not itest:
                    continue
                itests.add(itest)
        return itests

    def _compute_flags(self, test):
        self._compute_moz_flags(test)

        if test.failure_type:
            self._add_flags(test, "rthasfailuretype")
        if not test.url2:
            self._add_flags(test, "rtloadonly")
        if "reftest-print" in test.resources_text_content:
            self._add_flags(test, "rtprint")
        if "reftest-wait" in test.resources_text_content:
            self._add_flags(test, "rtwait")

    def _is_special_scheme(self, url):
        return url.startswith("data:") or url.startswith("about:")

    def create_test(self):
        test = Test()
        test.type = "reftest"
        # This requires that this test was creating using _line_to_itest.
        # That should always the case in the current implementation.
        assert self.line, "Invalid state!"

        reftest = self._parse_line(self.line, self.line_no)

        for prop_url in ("url", "url2"):
            u = reftest[prop_url]
            if not u or self._is_special_scheme(u):
                continue
            reftest[prop_url] = self.importer_manager.SERVER_URL + self.directory + "/" + u
        for prop_file in ("file", "file2"):
            f = reftest[prop_file]
            reftest[prop_file] = ""
            if not f or self._is_special_scheme(f):
                continue
            # posix style because it is stored in db
            reftest[prop_file] = self.directory + "/" + f
            assert os.path.isfile(join(self.tests_dir, reftest[prop_file])), \
                    "Reftest manifest in directory '%s' references non existing file %s" % \
                    (self.directory, f)

        test.url = reftest["url"]
        test.url2 = reftest["url2"]
        test.file = reftest["file"]
        test.file2 = reftest["file2"]

        test.equal = reftest["equal"]
        test.expected = reftest["expected"]
        # XXX not yet implemented
        # XXX should it be failure_type*s* ???
        test.failure_type = reftest["failure_types"]

        # XXX detect reftest that require http server
        ##url = self.importer_manager.SERVER_URL + path
        #test.file = path

        test.id = self.testid
        test.full_id = self.full_id

        # XXX should use self.line here instead?
        test.resources = set([self.testid])
        test.test_resources = set()
        test.test_resources.add(test.file if test.file else test.url)
        if test.file2:
            test.test_resources.add(test.file2)
        elif test.url2:
            test.test_resources.add(test.url2)

        self._fixup_test(test)

        return test

class MetadataEntry(object):
    """A metadata entry"""
    def __init__(self):
        self.regexps = []
        self.flags = set()
        self.tags = set()
        self.sections = set()

        self.regexps_lines = []
        self.regexps_hash = None

        self.lines = []
        self.lines_hash = None

    def add_regexp(self, regexp):
        self.regexps_lines.append(regexp)
        self.regexps.append(re.compile(regexp))

    def add_line(self, line):
        self.lines.append(line)

    def end_parsing(self):
        self.regexps_hash = hashlib.md5("".join(self.regexps_lines)).hexdigest()
        # keep self.regexps_lines for __repr__
        self.lines_hash = hashlib.md5("".join(self.lines)).hexdigest()
        del self.lines

    def __repr__(self):
        return "MetadataEntry<%s, %s>" % (self.regexps_lines,
                {"flags": self.flags,
                 "tags": self.tags,
                 "sections": self.sections})

class Metadata(object):
    COMMENT_RE = re.compile("\s*#")

    def __init__(self):
        self.entries = []

    def _get_sections(self, names):
        sections = set()
        for name in names:
            log.debug("Handling %s", name)
            # If there is no number or label, use the pseudo section "0" to match the whole spec
            section_num = name['section_num']
            if not section_num and not name['section_label']:
                section_num = "0"

            if section_num:
                section = Section.objects.get(spec__id=name['spec'],
                                              number=section_num)
                if name['section_label']:
                    assert section.label == name['section_label'], (
                        "Specified label '%s' for section from spec '%s', "
                        "number '%s' does not match the actual label of '%s'") % (
                        name['section_label'], name['spec'], section_num, section.label)
                section_id = section.pk
            else:
                assert name['section_label']
                section_id = Section.objects.get(spec__id=name['spec'],
                                                 label=name['section_label']).pk
            sections.add(section_id)
        return sections

    def parse(self, path):
        with open(path) as f:
            cur_entry = None
            in_regexps_lines = False
            for (line_no, line) in enumerate(f):
                if self.COMMENT_RE.match(line):
                    continue
                line = re.sub("\s+#.*$", "", line)
                line = line.rstrip()
                if not line:
                    continue
                if not line[0].isspace():
                    if not in_regexps_lines:
                        in_regexps_lines = True
                        if cur_entry:
                            cur_entry.end_parsing()
                            self.entries.append(cur_entry)
                        cur_entry = MetadataEntry()

                    cur_entry.add_regexp(line)
                    continue
                in_regexps_lines = False
                cur_entry.add_line(line)

                attr, names = line.split(":", 1)
                attr = attr.strip()

                if attr in ("flags", "tags"):
                    names = set(name.strip() for name in names.split(","))
                    setattr(cur_entry, attr, names)
                elif attr == "specs":
                    # Sample line:
                    # specs: html5:1.2, css2.1:1.3.4, html5:1.2:"The test title", css2.1, dom0
                    res = re.finditer("""(?P<spec>[\.\w]+)(?::(?P<section_num>[\d\.]+))?(?::"(?P<section_label>[^"]*)")?""",
                                      names)
                    names = [m.groupdict() for m in res]
                    try:
                        sections = self._get_sections(names)
                    except:
                        log.error("Error at line: %s when fetching sections: %s", line_no, names)
                        raise
                    cur_entry.sections.update(sections)
                else:
                    assert False, "Unrecognized attribute: '%s', line %s" % (
                                    attr, line_no)
            if cur_entry:
                cur_entry.end_parsing()
                self.entries.append(cur_entry)

    def update_test(self, test):
        for entry in self.entries:
            if not any(r.match(test.full_id) for r in entry.regexps):
                continue
            log.debug("got a match for %s", entry)

            test._flags.update(entry.flags)
            test._tags.update(entry.tags)
            test._sections.update(entry.sections)

    def get_modified_regexps(self, old_metadata):
        old_hash_to_entry = dict((e.regexps_hash, e) for e in old_metadata.entries)
        old_hashes = set(old_hash_to_entry.iterkeys())
        new_hash_to_entry = dict((e.regexps_hash, e) for e in self.entries)
        new_hashes = set(new_hash_to_entry.iterkeys())

        modified_entries = set()
        # entries that were deleted
        for deleted_hash in old_hashes - new_hashes:
            modified_entries.add(old_hash_to_entry[deleted_hash])

        # new entries
        for new_hash in new_hashes - old_hashes:
            modified_entries.add(new_hash_to_entry[new_hash])

        # now check the modified common entries
        for common_hash in new_hashes & old_hashes:
            if old_hash_to_entry[common_hash].lines_hash != \
               new_hash_to_entry[common_hash].lines_hash:
                modified_entries.add(old_hash_to_entry[common_hash])
                modified_entries.add(new_hash_to_entry[common_hash])

        log.debug("Modified entries: %s", modified_entries)
        regexps = set()
        for e in modified_entries:
            regexps.update(e.regexps)
        return regexps

class ImportState(object):
    PERSISTED_ATTRS = ("status", "itest_to_res", "orphan_res",
                       "last_import_version", "metadata")

    def __init__(self, tests_dir):
        # persisted attributes
        self.status = "new"
        self.itest_to_res = {}
        self.orphan_res = set()
        self.last_import_version = None
        self.metadata = Metadata()
        # transient attributes
        self.pickle_path = None
        self.all_resources = set()
        self.loaded = False

        self.pickle_path = join(tests_dir, "import_state.pickle")
        self._load()

    def _load(self):
        if not os.path.isfile(self.pickle_path):
            return
        with open(self.pickle_path) as f:
            obj = cPickle.load(f)
        #log.debug("Read import state: %s", obj)

        assert set(obj.iterkeys()) == set(self.PERSISTED_ATTRS), (
                 "Unexpected keys on unpickled object (%s)",
                 set(obj.iterkeys()) ^ set(self.PERSISTED_ATTRS))
        if obj["status"] != "ok":
            log.warn("Inconsistent import state (status: %s), ignoring" % obj["status"])
            if not "IGNORE_BROKEN_IMPORT_STATE" in os.environ:
                return
        self.loaded = True
        for attr in self.PERSISTED_ATTRS:
            setattr(self, attr, obj[attr])

        self.all_resourcs = set()
        for res in self.itest_to_res.itervalues():
            self.all_resources.update(res)
        self.all_resources.update(self.orphan_res)

    def dump(self):
        obj = {}
        for attr in self.PERSISTED_ATTRS:
            obj[attr] = getattr(self, attr)

        with open(self.pickle_path, "w") as f:
            cPickle.dump(obj, f)

    def __repr__(self):
        return super(ImportState, self).__repr__() + " status: %s" % self.status

class ImportException(Exception):
    pass

class ImporterManager(object):
    SERVER_URL = "http://localhost:8888/"
    IGNORED_PATHS_RE = re.compile(r"(^|[/\\])(\..*|CVS|\.svn)($|[/\\])")

    def __init__(self, hg_path="hg"):
        # The order of importers is important. In case of ambiguity, the first
        # importer that could locate a test will win.
        self.importers = [Layouttest, Mochitest, Reftest]

        self.hg_path = hg_path
        self.hgid = None

    def _has_hg(self):
        if os.path.exists(self.hg_path):
            return True
        return bool(which(self.hg_path))

    def _toposixpath(self, path):
        """
        Converts a path to POSIX style (forward slashes).
        All paths are stored with this style in the database to be OS independent.
        """
        return joinposix(*path.split(os.sep))

    def _is_ignored(self, path):
        IGNORED_RESOURCES = ("metadata.txt", "import_state.pickle")
        if path in IGNORED_RESOURCES:
            return True
        return self.IGNORED_PATHS_RE.search(path) != None

    def _get_imported_tests_and_resources(self, directory):
        imported_tests = set()
        resources = set()
        for root, dirs, files in os.walk(directory):
            toremove = []
            for d in dirs:
                if self._is_ignored(d):
                    toremove.append(d)
            for d in toremove:
                dirs.remove(d)

            # XXX not supported now because incremental checking won't read that file
            # XXX TODO: put a toplevel importignore.txt for this maybe
            #ignorefile = os.path.join(root, "importignore.txt")
            #if os.path.exists(ignorefile):
            #    for ignored_dir in open(ignorefile):
            #        ignored_dir = ignored_dir.strip()
            #        if ignored_dir in dirs:
            #            dirs.remove(ignored_dir)

            cur_dir = root[len(self.tests_dir) + 1:]
            cur_dir = self._toposixpath(cur_dir)
            for file in files:
                path = joinposix(cur_dir, file) if cur_dir else file
                if self._is_ignored(path):
                    continue
                resources.add(path)
                for importer in self.importers:
                    itests = importer.get_imported_tests(path)
                    if itests:
                        imported_tests.update(itests)
                        break
        return (imported_tests, resources)

    def _get_hgid(self, use_cached=True):
        if use_cached and self.hgid:
            return self.hgid
        log.info("Retrieving hgid...")
        hgid_output = subprocess.Popen([self.hg_path, "id"],
                                       cwd=self.tests_dir,
                                       stdout=subprocess.PIPE).communicate()[0]
        log.debug("hg id output %s", hgid_output)
        self.hgid = hgid_output.split()[0]
        if self.hgid.endswith("+"):
            raise ImportException("Uncommitted changes")
        return self.hgid

    def _get_hg_diff(self, import_state):
        assert os.path.isdir(join(self.tests_dir, ".hg")), \
                 "Directory to import is not a mercurial repository"

        current_hgid = self._get_hgid(False)

        log.debug("Current hg id: %s", current_hgid)
        log.info("import_state.last_import_version: %s", import_state.last_import_version)

        if not import_state.last_import_version or \
           not import_state.last_import_version.startswith("hg:"):
            log.info("Last import was not done with hg (or this is the first "
                     "import). Doing a full import (previous import_version: %s)" %
                     import_state.last_import_version)
            if "IMPORTER_ABORT_ON_MISSING_HG" in os.environ:
                raise("Missing hg")
            return None

        last_hgid = import_state.last_import_version[len("hg:"):]
        log.info("Last import hg id: %s", last_hgid)

        diff = subprocess.Popen([self.hg_path, "diff",
                                 "-r", last_hgid,
                                 "-r", current_hgid],
                cwd=self.tests_dir,
                stdout=subprocess.PIPE).communicate()[0]
        #print "hg diff:\n", diff
        return diff

    def _save_hg_import_version(self, import_state):
        hgid = self._get_hgid()
        import_state.last_import_version = "hg:" + hgid

    def _diff_to_dirty_res_and_itests(self, diff, from_hg, all_resources):
        if from_hg:
            assert not "diff --git " in diff, ("Mercurial git style diffs are "
                "not supported, remove git=1 in your .hgrc")
            files = HgDiffParser(diff).parse()
        else:
            files = DiffParser(diff).parse()

        dirty_itests = {"updated": set(), "added": set(), "deleted": set()}
        dirty_res = {"updated": set(), "added": set(), "deleted": set()}

        for file in files:
            orig_file, new_file = (file.origFile, file.newFile)
            if not from_hg:
                # strip leading path component from files
                orig_file, new_file = [f[f.index("/") + 1:] for f in (orig_file, new_file)]

            assert orig_file == new_file
            log.debug("Diff parsing, found file %s", new_file)
            if self._is_ignored(new_file):
                continue

            skipcount = 3 if from_hg else 2
            lines = file.data.splitlines()[skipcount:]
            for importer in self.importers:
                if importer.update_dirty_itests(new_file, lines, dirty_itests):
                    break
            else:
                existed_before = new_file in all_resources
                exists_now = os.path.isfile(join(self.tests_dir, new_file))
                log.debug("existed before: %s, exists now: %s", existed_before, exists_now)
                kind = ""
                if existed_before and exists_now:
                    kind = "updated"
                elif existed_before and not exists_now:
                    kind = "deleted"
                elif not existed_before and exists_now:
                    kind = "added"
                else:
                    assert False
                dirty_res[kind].add(new_file)

        log.debug("dirty_res %s", dirty_res)
        log.debug("dirty_itests %s", dirty_itests)
        return dirty_res, dirty_itests

    def _compute_dirty_itests(self, import_state, dirty_res, dirty_itests):
        """
        Given previous import_state and the set if dirty resources and dirty
        import tests, return import tests that were added/updated/deleted.
        """

        resource_to_itests = {}
        for (testid, resources) in import_state.itest_to_res.iteritems():
            for res in resources:
                resource_to_itests.setdefault(res, set()).add(testid)
        log.debug("resource_to_itests: %s", resource_to_itests)

        dirty_itests_unsorted = set()
        for res in itertools.chain(dirty_res["updated"], dirty_res["deleted"]):
            if res in resource_to_itests:
                dirty_itests_unsorted.update(resource_to_itests[res])
        log.debug("dirty_itests_unsorted: %s", dirty_itests_unsorted)

        for itest in dirty_itests_unsorted:
            # file based itests (reftest, layouttest) could be deleted or updated.
            # Reftest can only be updated by a resource change, the add/delete case
            # has been handled in _diff_to_dirty_res_and_itests().
            if (itest in dirty_itests["added"] or
                itest in dirty_itests["deleted"]):
                continue

            kind = "updated"
            if itest.file_based:
                exists = itest.exists()
                if not exists:
                    # Test could be of another type
                    for importer in self.importers:
                        itests = importer.get_imported_tests(itest.path)
                        if itests:
                            assert len(itests) == 1
                            itest = itests[0]
                            exists = True
                            break
                kind = "updated" if exists else "deleted"
            log.debug("Dirty itest %s is kind: %s", itest, kind)
            dirty_itests[kind].add(itest)

        for res in dirty_res["added"]:
            for importer in self.importers:
                itests = importer.get_imported_tests(res)
                if itests:
                    dirty_itests["added"].update(itests)
                    break

        # Look for tests that we're not going to visit which could be affected
        # by metadata change
        unvisited_itests = set(import_state.itest_to_res.iterkeys()) - \
                             dirty_itests["updated"] - dirty_itests["deleted"]

        # XXX or use this to fill the dirty_testids set() instead
        regexps = self.metadata.get_modified_regexps(import_state.metadata)
        log.debug("Modified regexps: %s", regexps)
        for regexp in regexps:
            for itest in unvisited_itests:
                # XXX on what should metadata match ??
                if regexp.match(itest.testid):
                    dirty_itests["updated"].add(itest)
        return dirty_itests

    def _import_dirty_itests(self, dirty_itests, stats, itest_to_resources, incremental):
        stats["updated"] = 0
        stats["added"] = 0
        stats["unupdated"] = 0

        log.debug("import_dirty_testids, dirty_itests: %s", dirty_itests)

        def assert_no_intersection(kind1, kind2):
            intersection = dirty_itests[kind1] & dirty_itests[kind2]
            assert not intersection, "Set %s %s have an intersection: %s" % (
                kind1, kind2, intersection)
        assert_no_intersection("added", "updated")
        assert_no_intersection("added", "deleted")
        assert_no_intersection("deleted", "updated")

        for kind in ("added", "updated"):
            for itest in dirty_itests[kind]:
                log.debug("Handling test: %s", itest)
                test = itest.create_test()

                save = True
                msg = ""
                if kind == "updated":
                    existing_test = itest.get_test()
                    existing_test.load_m2m()

                    if not existing_test.is_equal(test):
                        # The newly created test will have a version of 0,
                        # use the one from the existing test.
                        test.version += existing_test.version + 1
                        stats["updated"] += 1
                        msg = "Updated"
                    else:
                        save = False
                        stats["unupdated"] += 1
                        log.debug("Test (%s) didn't change, not saving" % itest.testid)
                        # test.sections could change without incrementing the test
                        # version, update the field manually
                        test.save_m2m_attr("sections", Section)
                else:
                    stats["added"] += 1
                    msg = "Added"

                if save:
                    test.save()
                    log.log(logging.INFO if incremental else logging.DEBUG,
                            "%s test %s", msg, itest)
                itest_to_resources[itest] = test.resources

        for itest in dirty_itests["deleted"]:
            deleted_test = itest.get_test()
            deleted_test.deleted = True
            deleted_test.save()
            log.info("Deleted test %s", itest)
            # XXX what's the policy for really deleting from the db?

    def _do_import_tests(self, tests_dir, from_hg=True, diff=None,
                         verify_incremental=False):
        self.tests_dir = tests_dir.rstrip("\\/")
        log.info("Importing tests directory %s", self.tests_dir)

        ImportedTest.tests_dir = self.tests_dir
        ImportedTest.importer_manager = self

        import_state = ImportState(self.tests_dir)
        log.debug("import state %s", import_state)

        self.metadata = Metadata()
        metadata_path = join(self.tests_dir, "metadata.txt")
        if os.path.isfile(metadata_path):
            self.metadata.parse(metadata_path)

        if from_hg and not self._has_hg():
            log.warn("Mercurial command not found (looked for '%s'), "
                     "import won't be incremental." % self.hg_path)
            from_hg = False
        if from_hg and os.path.isdir(join(self.tests_dir, ".hg")):
            assert not diff, "No diff should be given if using from_hg=True"
            diff = self._get_hg_diff(import_state)

        test_count = Test.objects.count()
        incremental = test_count > 0 and diff != None and import_state.loaded
        log.debug("Type of import: %s", "incremental" if incremental else "full")

        all_resources = set()
        stats = {"total": 0, "updated": 0, "added": 0, "deleted": 0}

        # From now on, the ImportState and/or database can be modified. The
        # import_state status is set to "pending" so that if something goes
        # wrong, a full import will be done on the next import.
        import_state.status = "pending"
        import_state.dump()

        if incremental:
            all_resources = import_state.all_resources

            dirty_res, dirty_itests = self._diff_to_dirty_res_and_itests(diff,
                                        from_hg, all_resources)
            log.debug("dirty_res: %s", dirty_res)
            log.debug("dirty_itests %s", dirty_itests)

            dirty_itests = self._compute_dirty_itests(import_state, dirty_res, dirty_itests)
            log.debug("dirty_itests %s", dirty_itests)

            # Update all_resources
            all_resources.difference_update(dirty_res["deleted"])
            all_resources.update(dirty_res["added"])

            if verify_incremental:
                # XXX kind of sucks to have to filter that.
                all_res = set(r for r in all_resources if not urlparse.urlparse(r).scheme in ("about", "data"))
                # remove .svn directories
                all_res = set(r for r in all_res if not ".svn" in r)

                res_filesystem = self._get_imported_tests_and_resources(self.tests_dir)[1]

                # XXX remove reftests for now
                all_res = set(r for r in all_res if not "reftest:" in r and not "reftest.list" in r)
                res_filesystem = set(r for r in res_filesystem if not "reftest:" in r and not "reftest.list" in r)

                assert all_res == res_filesystem, ("Incremental resources " + \
                        "does not match actual resources.\n\n  From incremental: %s\n\n" + \
                        "  Found on filesystem: %s\n\n  Incr not in fs: %s\n\n  Fs not in incr: %s") % \
                        (all_res, res_filesystem, all_res - res_filesystem, res_filesystem - all_res)

            itest_to_resources = import_state.itest_to_res

        else:
            log.info("Crawling tests directory looking for tests...")
            fromfs_itests, all_resources = \
                self._get_imported_tests_and_resources(self.tests_dir)

            fromdb_itests = set(ImportedTest.create_from_test(test) for test in Test.objects.filter(deleted=False))
            dirty_itests = {"updated": set(), "added": set(), "deleted": set()}

            for itest in fromfs_itests:
                kind = "updated" if itest in fromdb_itests else "added"
                dirty_itests[kind].add(itest)
            dirty_itests["deleted"] = fromdb_itests - dirty_itests["updated"]

            stats["total"] = len(fromfs_itests)
            itest_to_resources = {}

        log.info("Importing tests...")
        self._import_dirty_itests(dirty_itests, stats, itest_to_resources, incremental)

        for itest in dirty_itests["deleted"]:
            if itest in itest_to_resources:
                del itest_to_resources[itest]

        used_by_tests_resources = set()
        for itest, resources in itest_to_resources.iteritems():
            used_by_tests_resources.update(resources)
        orphan_resources = all_resources - used_by_tests_resources
        log.debug("Orphan resources: %s", orphan_resources)

        log.info("************* Import statistics *************")
        log.info("  total: %(total)s  added: %(added)s  updated: %(updated)s"
                 "  unupdated: %(unupdated)s  deleted: %(deleted)s" % stats)

        import_state.itest_to_res = itest_to_resources
        log.debug("Saving import_state.itest_to_res %s", import_state.itest_to_res)
        if self._has_hg() and os.path.isdir(join(self.tests_dir, ".hg")):
            self._save_hg_import_version(import_state)
        import_state.orphan_res = orphan_resources
        import_state.status = "ok"
        import_state.metadata = self.metadata
        import_state.dump()

    def import_tests(self, tests_dir, from_hg=True, diff=None,
                     verify_incremental=False):
        import_start_time = time.time()
        status = Status.objects.get(pk=1)
        if status.generation == Status.BUSY:
            log.warn("Status in busy state, another import in progress?")
            sys.exit(3)
            return
        last_generation = status.generation
        log.debug("last_generation: %s", last_generation)
        status.generation = Status.BUSY
        status.save()
        try:
            self._do_import_tests(tests_dir, from_hg, diff, verify_incremental)
        finally:
            status.generation = last_generation + 1
            status.save()
            log.debug("new generation: %s", status.generation)
            log.info("Import duration: %s", time.time() - import_start_time)

def main():
    usage = "%prog [OPTIONS] CONFIG_FILE"
    parser = OptionParser(usage=usage)
    parser.add_option('--show-import-state', action="store_true",
                      default=False, help='Dump import state data')
    parser.add_option('--no-hg', action="store_true",
                      default=False, help='Do not user Mercurial')
    parser.add_option('--hg-path', default='hg', help='Mercurial path')
    parser = cli_wrapper.add_common_parser_options(parser)
    options, args = parser.parse_args()

    if len(args) != 0:
        parser.print_help()
        sys.exit(2)

    cli_wrapper.process_options(options)

    # TODO: add an option to force a full import

    if options.unit:
        from browsertests.importer.test_importer import run_tests
        run_tests(options)
        sys.exit(0)

    if options.show_import_state:
        import_state = ImportState(options.tests_path)
        log.info("ImportState: %s", import_state)
        log.info("  last_import_version: %s", import_state.last_import_version)
        import pprint
        if False:
            print "-" * 80
            print "import_state.itest_to_res:"
            pprint.pprint(import_state.itest_to_res, indent=2)
            print "-" * 80
            print "import_state.orphan_res:"
            for r in sorted(import_state.orphan_res):
                print "     ", r
        if False:
            pprint.pprint(import_state.orphan_res, indent=2, width=80)
        sys.exit(0)

    importer_manager = ImporterManager(options.hg_path)
    importer_manager.import_tests(options.tests_path, from_hg=(not options.no_hg))

if __name__ == "__main__":
    main()
