
"""
Generates a table of lag time versus ref counts.

That is, it goes through all the references for which a non-zero time
has been found within the text of the fetched contents.

The extract_datetime.py script assumes that the first parsable date in
the visible HTML text is the publication time.  This appears to be
true about 70% of the time -- from visually inspecting a sample of 20
examples.

This script takes that supposed publication datetime and subtracts it
from the earliest revision to each Wikipedia article in which this
page is cited.  This generates a time lag.  Each (ref, article) pair
is considered a separate measurement.  

Negative time lags are simply ignored and not included in the
counting.  These result from incorrectly extracted publication
datetimes.

The resulting data is stored in lag_histogram.json, which is then
furthered munged by hand to produce the plot shown in
lag_histogram.xlsx
"""
# $Id: lag_histogram.py 34 2011-08-04 02:20:49Z postshift@gmail.com $

import json
import time
import datetime
import wikinbitDb

lagCounts = {}
cur1 = wikinbitDb.conn.cursor()
cur1.execute("""select refUrl, foundTime from refs_times where foundTime > 0""")
cur2 = wikinbitDb.conn.cursor()
for refUrl, foundTime in cur1:
    cur2.execute("select distinct(urlName) from revisions where refUrl = %s", (refUrl,))
    urlNames = cur2.fetchall()
    if not urlNames:
        print "How did we get no urlNames for %s" % refUrl
        continue
    #print "Found %d pages using the same refUrl\n\t%s\n\t--> %s" % (
    #    len(urlNames), str(urlNames[0]), refUrl)
    for urlName in urlNames[0]:
        cur2.execute("select min(revisionTime) from revisions where urlName = %s and refUrl = %s", (urlName, refUrl))
        earliestRevisionTime = cur2.fetchall()[0][0]

        elapsed = earliestRevisionTime - foundTime
        if elapsed < 0:
            continue

        elapsed_struct = datetime.timedelta(seconds=elapsed)
        print "%s cited %s\n\tcited:    %s\n\tpub date: %s\n\t%d seconds (%s) lag time" % (
            urlName, refUrl, 
            time.asctime(time.gmtime(earliestRevisionTime)),
            time.asctime(time.gmtime(foundTime)),
            elapsed,
            elapsed_struct
            )
        
        lagCounts[elapsed_struct.days] = lagCounts.get(elapsed_struct.days, 0) + 1


fh = open("lag_histogram.json", "w")
fh.write(json.dumps(lagCounts))
fh.close()
print "Done!"
