#!/usr/bin/python
"""Locates identical bugs from different bugzillas"""

import xml.etree.cElementTree as ET
import sys
from glob import glob
import traceback
import re

# optimizations
try:
    import psyco
    psyco.full()
except:
    print "No runtime optimization."
    pass

bugzilla_r = re.compile("(http[s]*://[a-z0-9-_.]*/)show_bug.cgi?.*id=(\d+)")
launchpad_r = re.compile("(http[s]*://bugs.launchpad.net/[a-zA-Z0-9-+_/.]*)/(\d+)")

# from http://boodebr.org/main/python/all-about-python-and-unicode#UNI_XML
RE_XML_ILLEGAL = u'([\u0000-\u0008\u000b-\u000c\u000e-\u001f\ufffe-\uffff])' + \
        u'|' + \
        u'([%s-%s][^%s-%s])|([^%s-%s][%s-%s])|([%s-%s]$)|(^[%s-%s])' % \
        (unichr(0xd800),unichr(0xdbff),unichr(0xdc00),unichr(0xdfff),
                unichr(0xd800),unichr(0xdbff),unichr(0xdc00),unichr(0xdfff),
                unichr(0xd800),unichr(0xdbff),unichr(0xdc00),unichr(0xdfff))
regex = re.compile(RE_XML_ILLEGAL)

def add_to_list(bugs, curbug, newbug):
    """Adds a similar bug to DB"""
    if curbug not in bugs:
        bugs[curbug] = []
    bugs[curbug].append(newbug)

def permutate(l):
    '''Returns all possible combinations of list values'''
    #very very very slow for large lists
    #return reduce(lambda x, y: y + x, [ [(x, y) for y in l if x != y] for x in l ])
    res = []
    for x in l:
        for y in l:
            if x != y:
                res.append((x, y))
    return res

def parse_bugs(file, bugs={}, titles={}, duplicates={}):
    """Locates similar or duplicated bugs"""
    data = open(file).read()

    try:
        res = ET.fromstring(data)
    except SyntaxError:
        # looks like we found an invalid character
        print >>sys.stderr, "Invalid characters found, stripping..",
        # this is slow, but it is the best we can do right now..
        for match in regex.finditer(data):
            data = data[:match.start()] + "?" + data[match.end():]
        res = ET.fromstring(data)

    baseurl = res.get("urlbase")
    if baseurl[len(baseurl)-1] != "/":
        baseurl = baseurl+"/"
    for bug in res:
        # Check if bug is valid
        if bug.get("error"):
            continue
        # XPATH
        bug_id = bug.findtext("./bug_id")
        short_desc = bug.findtext("./short_desc", "Bug %s (no short description)" % bug_id)
        curbug = "%s%s" % (baseurl, bug_id)
        # is it a duplicated bug?
        dup_id = bug.findtext("./dup_id")
        if dup_id:
            # print "Duplicate found: %s -> %s!" % (bug_id, dup_id)
            newbug = "%s%s" % (baseurl, dup_id)
            add_to_list(duplicates, curbug, newbug)
        # TODO: use hash?
        #print short_desc.__hash__()
        add_to_list(titles, short_desc, curbug)
        for comment in bug.findall("./long_desc/thetext"):
            text = comment.text
            if not text:
                continue
            for tracker in [ bugzilla_r, launchpad_r ]:
                res = tracker.findall(text)
                for base, id in res:
                    newbug = "%s/%s" % (base, id)
                    add_to_list(bugs, curbug, newbug)

if __name__ == "__main__":
    if len(sys.argv) < 4:
        print "Usage: %s <bugs dir> <output file> <format>" % sys.argv[0]
        print "Format can be: turtle or plain"
        sys.exit(1)
    bugsdir = sys.argv[1]
    output = open(sys.argv[2], "w")
    format = sys.argv[3]

    templates = {}
    if format == "turtle":
        TEMPLATE_DIR="./templates"
        for t in ["header", "rel_duplicates", "rel_related"]:
            templates[t] = open("%s/%s" % (TEMPLATE_DIR, t)).read()
    else:
        templates["header"] = ""
        templates["rel_duplicates"] = "%(curbug)s = %(newbug)s"
        templates["rel_related"] = "%(curbug)s ? %(newbug)s"
    
    bugs = {}
    titles = {}
    duplicates = {}

    # looking for files
    files = glob("%s/*xml" % bugsdir)
    for file in files:
        # are we doing one file at a time?
        print >>sys.stderr, "Parsing %s.." % file,
        try:
            parse_bugs(file, bugs=bugs, titles=titles, duplicates=duplicates)
            print >>sys.stderr, "ok"
#            os.unlink(file)
        except:
            traceback.print_exc()
            print >>sys.stderr, "error parsing %s: %s" % (file, sys.exc_value)
            sys.exit(1)
    print >>output, templates["header"]
    # duplicate bugs
    print >>output, "# duplicated bugs"
    for bug in duplicates:
        dups = duplicates[bug]
        dups.append(bug)
        for curbug, newbug in permutate(dups):
            print >>output, templates["rel_duplicates"] %  { "curbug": curbug, "newbug": newbug }
    # related bugs
    print >>output, "# bug references"
    for bug in bugs:
        dups = bugs[bug]
        dups.append(bug)
        for curbug, newbug in permutate(dups):
            print >>output, templates["rel_related"] % { "curbug": curbug, "newbug": newbug }
#        print >>output, "%s: %s" % (bug, ", ".join(bugs[bug]))
    # repeatable short_descs
    # we don't need this anymore, optimized virtuoso is fast enough
    # UPDATE: well, for plain text queries they are quite useful
    repeatable = [x for x in titles if len(titles[x]) > 1]
    print >>output, "# short description perfect match"
    for bug in repeatable:
        curtitles = titles[bug]
        for curbug, newbug in permutate(titles[bug]):
            print >>output, templates["rel_related"] % { "curbug": curbug, "newbug": newbug }
    output.close()
#    print files
