"""
Reads a list of page names from wikipedia, gets their revision
history, and searches the revision history for <ref> tags with URLs,
which it stores in a postgres DB.
"""
# $Id: when_linked.py 30 2011-08-03 01:12:18Z postshift@gmail.com $
import re
import sys
import time
import difflib
import urlparse
import traceback
import wikipedia as pywikibot

# It appears that none of the pywikipedia stuff is packaged, so we'll
# just keep hacking:
sys.path.append("..")
import wikinbitDb

from optparse import OptionParser
parser = OptionParser(usage="")
parser.add_option("--num_names", dest="desired_num_names", type=int)
(options, args) = parser.parse_args()

num_names = 0
while num_names < options.desired_num_names:
    cur = wikinbitDb.conn.cursor()
    try:
        cur.execute("select urlName from articles where revisionsFetched is FALSE limit 1")
        urlName = cur.fetchall()
        if len(urlName) == 0:
            sys.exit("Reached the end!")
        else:
            urlName = urlName[0][0]
        assert urlName
    except Exception, exc:
        sys.exit(traceback.format_exc(exc))        

    try:
        page = pywikibot.Page(pywikibot.getSite(), urlName)
        history = page.getVersionHistory(
            forceReload=True, 
            getAll=True, 
            reverseOrder=True)
    except Exception, exc:
        print "Failed to load revisions for: %s\n\n%s\n" % (urlName, traceback.format_exc(exc))
        # give up on fetching this page's revisions...
        cur.execute("update articles set revisionsFetched=True where urlName = %s", (urlName,))
        continue

    # This is a bit of a misnomber, because the items put into this
    # array are per-link, and a single revision can have many lines
    # each of which might have more than one link.
    revisions = []

    # content before the first revision is empty string:
    prev = ""
    # iterate through all the revisions, diffing against previous
    for (revisionId, editTimestamp, username, editSummary, 
         editSize, editTags, content) in history:
        #print content.encode('utf8')
        # wow this is an odd way to do timestamps in pywikibot...
        #revisionTime = pywikibot.parsetime2stamp(editTimestamp)

        revisionTime = time.strptime(editTimestamp, "%Y-%m-%dT%H:%M:%SZ")
        revisionTime = int(time.strftime("%s", revisionTime))

        # start lineNumbers at 0; increment at beginning of for loop
        lineNumber = -1  
        for line in difflib.ndiff(prev.splitlines(), content.splitlines()):
            # keep track of lineNumber generated by interaction
            # between difflib.ndiff and splitlines to use as part of
            # unique key (urlName, revisionId, lineNumber, linkNumber)
            lineNumber += 1
            if line[0] == "+":
                # plus symbol from difflib means modified line.  Look
                # for URLs that are http(s) or ftp(s), and note that
                # wikipedia uses several termiantor symbols to
                # separate metadata fields in citation <ref> tags.
                
                # the list of possible terminators in wikipedia text
                # appears to be:
                terminators = '''\s|"}\],<'''

                linkNumber = 0
                while len(line) > 0:
                    m = re.search(u'''((http|ftp).*?)([%s]|$)(.*)''' % terminators, line)
                    if not m: 
                        # did not find any (more) URLs in the line
                        break
                    line = m.group(4)  # continue deepr into the line
                    try:
                        refUrl = m.group(1)
                        parsedRefUrl = urlparse.urlsplit(refUrl)
                        refUrl = parsedRefUrl.geturl()
                        refUrl = refUrl.decode("utf8").encode("utf8")
                        if len(refUrl) == 0:
                            refUrl = None
                    except Exception, exc:
                        print traceback.format_exc(exc)
                        print "failed on: " + repr(refUrl)
                        refUrl = None
                    if not parsedRefUrl.scheme in ["http", "https", "ftp", "ftps"]:
                        print "rejecting scheme: " + repr(parsedRefUrl.scheme)
                        refUrl = None
                    # prepare to put it in the DB
                    revisions.append({
                            'urlName':         urlName,
                            'revisionTime':    int(revisionTime),
                            'revisionId':      int(revisionId),
                            'lineNumber':      int(lineNumber),
                            'linkNumber':      linkNumber,
                            'refUrl':          refUrl
                            })
                    # In case we get multiple links within this same
                    # modified line, we increment linkNumber:
                    linkNumber += 1
        # prepare to compare with next revision
        prev = content

    # put it all in the DB
    cur = wikinbitDb.conn.cursor()
    try:
        cur.executemany("""INSERT INTO revisions(urlName, revisionId, revisionTime, lineNumber, refUrl) VALUES (%(urlName)s, %(revisionId)s, %(revisionTime)s, %(lineNumber)s, %(refUrl)s)""", revisions)
    except Exception, exc:
        print traceback.format_exc(exc)
        print repr(revisions)
        for r in revisions:
            print r["urlName"], repr(r["refUrl"])
        sys.exit("ERROR.")
    cur.execute("UPDATE articles SET revisionsFetched=TRUE where urlName=%s", (urlName,))

    wikinbitDb.conn.commit()
    num_names += 1
    print "Inserted %d revisions for %s" % (len(revisions), urlName)
    sys.stdout.flush()

    if num_names % 10 == 0:
        wikinbitDb.reset()
        cur = wikinbitDb.conn.cursor()

print "Done fetching revisions for %d articles" % num_names
wikinbitDb.conn.close()
