from types import *
from util import *
from data import *
from datetime import timedelta, date

searchNotFoundPapers = False
updateDelta=timedelta(days = 60)
daysStartToCountImpact = 365
daysEndToCountImpact = 365*6


def createPattern(paperName):
    import re
    words = re.split("\W+", paperName[:80])
    separator = r'([^a-zA-Z0-9])*'
    patternString = "^(\[(CITATION|PDF|BOOK)\])?[^a-zA-Z0-9]*" + words[0]
    for i in range(1, len(words)):
        if words[i] == "a": words[i] = r"a?"
        patternString +=  separator + words[i]
    #patternString = "^" + re.sub(r'([^a-zA-Z0-9])+', r'([^a-zA-Z0-9])*', paperName[:80])
    if len(patternString.split("([")) > 3:
        alternativePattern = u'^\u2026([' + patternString.split("([", 1)[1]
        alternativePattern2 = u'^\u2026([' + patternString.split("([", 2)[2]
        alternativePattern3 = u'^\u2026([' + patternString.split("([", 3)[3]
        patternString = "(%s)|(%s)|(%s)|(%s)" % (patternString, alternativePattern, alternativePattern2, alternativePattern3)
    
    pattern = re.compile(patternString, re.MULTILINE | re.IGNORECASE)
    return pattern


def getURL(url):
    import urllib
    agent = "Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.7.10) Gecko/20050716 Firefox/1.0.6"
    opener = urllib.FancyURLopener()
    opener.addheaders.pop(0)
    opener.addheader("User-agent", agent)
    f = opener.open(url)
    s = f.read()
    f.close()
    return s

def getPaperSearchPage(name, author):
    import urllib
    params = urllib.urlencode({"q": (name + u" " + author).encode('utf-8'), 'hl': "en", 'lr': "", "btnG":"Search", })
    url = "http://scholar.google.com/scholar?%s" % params
    return getURL(url)


# return none means no record found in the page    
def findCitationsInPage(paperName, searchHtml):
    import re
    import html2text
    searchHtml = html2text.html2text(searchHtml)
    #saveToFile(searchHtml.encode("utf-8"), "temp.txt")
    
    #locate the paper
    pattern = createPattern(paperName)
    result = pattern.search(searchHtml)
    if result is None:
        return "notFound"
    searchHtml = searchHtml[result.end():]
    
    #remove other papers
    result = re.search("Web Search", searchHtml, re.IGNORECASE)
    if not (result is None):
        searchHtml = searchHtml[: result.start()]
        
    #find citations
    result = re.search("Cited by (?P<times>[0-9]+)", searchHtml, re.IGNORECASE)
    if result is None:
        return 0
    return int(result.group('times'))

def getCitationsFromGoogle(paperName, author):
    s = getPaperSearchPage(paperName, author)
    return findCitationsInPage(paperName, s)

def getPaperList(dblpPage):
    import html2text
    import re
    dblpPage = html2text.html2text(dblpPage)
    authorPattern = r"\*[^\:\n]+\:"
    authorResult = re.finditer(authorPattern, dblpPage)
    result = Papers()
    for r in authorResult:
        pagePattern = r"[\.\!\?] (?P<start>\d+)(-(?P<end>\d+))?"
        pageResult = re.search(pagePattern, dblpPage[r.end():])
        if pageResult:
            end = pageResult.start() + r.end()
        else:
            end = len(dblpPage)
        title = dblpPage[r.end():end].strip()
        authors = dblpPage[r.start()+1:r.end()-1].split(",")
        authors = map (lambda x: x.strip(), authors)
        paper = Paper(title, authors)
        paper.startPage = int(pageResult.group('start'))
        paper.endPage = pageResult.group('end')
        if paper.endPage == None: 
            paper.endPage = paper.startPage
        else:
            paper.endPage = int(paper.endPage)
        result.append(paper)
    
    basic_monthPattern = r"January|February|March|April|May|June|July|August|September|October|November|December"
    monthPattern = lambda i:"(?P<month" + str(i) + ">" + basic_monthPattern + ")"
    basic_dayPattern = r"[0-3]?\d"
    dayPattern = lambda i:"(?P<day" + str(i) + ">" + basic_dayPattern + ")"
    yearPattern = r"(?P<year>\d\d\d\d)"
    datePattern = "((" + monthPattern(4) + " " + dayPattern(4) + "(-(" + monthPattern(3) + " )?" + dayPattern(3) + ")?)|" + \
        "(" + dayPattern(2) + "( " + monthPattern(2) + ")?(-" + dayPattern(1) + " " + monthPattern(1) + ")?))" + \
        " ?" + ",?" + " ?" + yearPattern
    dateResult = re.search(datePattern, dblpPage)
    if dateResult != None:
        print dblpPage[dateResult.start():dateResult.end()]
        year = int(dateResult.group("year"))
        month = None
        day = None
        for i in range(1,5):
            if dateResult.group("month" + str(i)) != None:
                month = month2Int(dateResult.group("month" + str(i)))
                break
        for i in range(1,5):
            if dateResult.group("day" + str(i)) != None:
                day = int(dateResult.group("day" + str(i)))
                break
        from datetime import date
        result.date = date(year, month, day)
    return result
    

def getCitationsForPaperList(papers, notUpdateDelta=timedelta.max, searchNotFound=False):
    from datetime import date
    for paper in papers:
        if hasattr(paper, "citations") and hasattr(paper, "updateDate"):
            if date.today() - paper.updateDate < notUpdateDelta:
                if (not searchNotFound) or (not paper.citations == "notFound"):
#                    print "passed paper \"%s\"..." % paper.title[:40].encode('utf-8')
                    continue
        try:
            print "getting paper \"%s\"..." % paper.title[:40].encode('utf-8')
            authorString = ""
            for author in paper.authors:
                import re
                authorString += re.findall('\S+', author)[-1] + ' '
            paper.citations = getCitationsFromGoogle(paper.title, authorString[:-1])
            paper.updateDate = date.today()
            if (paper.citations == "notFound"):
                print "not found"
            else:
                print "cited %s times" % paper.citations
            import random
            randomNumber = random.random()
            seconds = randomNumber * (600) * randomNumber * randomNumber * randomNumber* randomNumber* randomNumber + 5
            print "sleeping %d seconds to avoid flush Google" % seconds
            import time
            time.sleep(seconds)
        except IOError, (errno, strerror):
            print "failed. Reason:"
            print "I/O error(%s): %s" % (errno, strerror)
    return papers      

def filterPaper(paperList, minPageNumber, maxPageNumber):
    def pageInRange(paper):
        page = paper.endPage - paper.startPage + 1
        result = paper.startPage != None and paper.endPage != None and page >= minPageNumber and page <= maxPageNumber
        if not result:
            print "filtered paper \"%s\". Page: %i " % (paper.title[:80], page)
        return result
    result = filter(pageInRange, paperList)
    result = Papers(result, paperList.date)
    return result
        

def urlToXML(url, fileName, minPageNumber, maxPageNumber):
    print "fetching %s" % url
    paperList = getPaperList(getURL(url))
    if maxPageNumber != None:
        if minPageNumber == None: minPageNumber = maxPageNumber - 1
        paperList = filterPaper(paperList, minPageNumber, maxPageNumber)
    print "get %i papers" % len(paperList)
    xml = paperListToXML(paperList)
    print "saving to %s" % fileName
    saveToFile(xml, fileName)
    print "ok"

def getCitationForFile(oldFileName, newFileName, notUpdateDelta=timedelta.max, searchNotFound=False):
    papers = XMLToPaperList(oldFileName)
    try:
	    papers = getCitationsForPaperList(papers, notUpdateDelta, searchNotFound)
    finally:
        xml = paperListToXML(papers)
        saveToFile(xml, newFileName)


def propagatePaperList():
    import os
    import os.path
    for path in conferenceFiles(paperListPath):
        if os.path.exists(dataPath + path) and \
           os.path.getmtime(dataPath + path) > \
           os.path.getmtime(paperListPath + path):
            continue
        print "copying file %s" % path
        copyFile(paperListPath+path, dataPath + path)

def updateCitations():
    for path in conferenceFiles(dataPath):
        path = dataPath + path
        print ""
        print "Updating %s" % path
        getCitationForFile(path, path, updateDelta, searchNotFoundPapers)



def statistics():
    for confDir in conferences(dataPath):
        conf = Conferences
        conf.citations = 0
        conf.foundedPapers = 0
        conf.events = []
        conf.name = confDir
        conf.citationsPerYear = 0
        conf.countedPapers = 0
        for eventFile in events(dataPath, confDir):
            path = dataPath + confDir + "/" + eventFile
            papers = XMLToPaperList(path)
            event = Event()
            event.papers = len(papers)
            event.foundedPapers = 0
            event.citations = 0
            event.date = papers.date
            if papers.date == None:
                event.daysPassed = None
            else:
                event.daysPassed = date.today() - papers.date
            for paper in papers:
                if hasattr(paper, "citations") and \
                  paper.citations != "notFound":
                    event.citations += paper.citations
                    event.foundedPapers += 1
            if event.foundedPapers == 0:
                event.impact = 0
            else:
                event.impact = event.citations * 1.0 / event.foundedPapers
            event.usedInRanking = False
            if event.daysPassed != None:
                event.impactPerYear = event.impact * 365.0 / event.daysPassed.days
                if event.daysPassed.days >= daysStartToCountImpact and event.daysPassed.days <= daysEndToCountImpact:
                    event.usedInRanking = True
                    conf.citationsPerYear += event.citations * 365.0 / event.daysPassed.days
                    conf.countedPapers += event.foundedPapers
            else:
                event.impactPerYear = None
            event.name = eventFile[:-4]
            conf.events.append(event)
            conf.citations += event.citations
            conf.foundedPapers += event.foundedPapers
        if conf.foundedPapers == 0:
            conf.impact = 0
        else:
            conf.impact = conf.citations * 1.0 / conf.foundedPapers
        if conf.countedPapers == 0:
            conf.impactPerYear = 0
        else:
            conf.impactPerYear = conf.citationsPerYear / conf.countedPapers
        saveToFile(confToXML(conf), dataPath + confDir + "/statistics.xml")
        

def conferenceToPath(conf):
    return paperListPath + conf[:-4] + "/" + conf + ".xml"

def getPaperPageText(name, author):
    import html2text
    html = getPaperSearchPage(name, author)
    text = html2text.html2text(html)
    return text

if __name__ == "__main__":
    if sys.argv[1:]:
        if sys.argv[1] == "-fetch":
            if sys.argv[3:] and len(sys.argv[3]) > 4:
                path = conferenceToPath(sys.argv[3])
                if os.path.exists(path):
                    print "target conference already exists"
                    exit()
                min = None
                if sys.argv[5:]: min = int(sys.argv[5])
                max = None
                if sys.argv[4:]: max = int(sys.argv[4])
                
                urlToXML(sys.argv[2], path, min, max)
                exit()
        if sys.argv[1] == "-update":
            propagatePaperList()
            try:
                updateCitations()
            finally:
                statistics()
            exit()
        if sys.argv[1] == "-count":
            if sys.argv[2:]:
                path = conferenceToPath(sys.argv[2])
                print "The number of papers: %i" % len(XMLToPaperList(path))
                exit()
                

    print """
Usage:
    -fetch dblpUrl conferenceName [maxNumberofPage [minNumberofPage]]|
    -count conferenceName |
    -update 
"""
                
                
            
