#!/usr/bin/env python

import sys
import HTMLParser
import logging
import urllib2
import pprint
import data

# Set up the logger
logger = logging.getLogger('allConverging.allconsuming.webdac')

baseUrl = 'http://www.allconsuming.net' 
personPattern = baseUrl + '/person/%s/consumed/book'
personAdditionalPagesPattern = personPattern + '?page='

itemUrl = baseUrl + '/item/consumers/%s?view=consumed'
itemAdditionalPagesPattern =  itemUrl + '&page='

WORTHIT_SCORE = 4
NOTWORTHIT_SCORE = 2

def getMaxPageNumber(data):
    searchString = '&amp;page='

    # find the last entry
    match = data.rfind(searchString)
    if match < 0:
        searchString = '?page='
        match = data.rfind(searchString)
    if match < 0:
        return 1
    else:
        pageNumber = ''
        index = match + len(searchString)
        while data[index] in '0123456789':
            pageNumber += data[index]
            index += 1
        return int(pageNumber)

def getItemConsumables(itemNumber):

    logger.info('fetching consumables for item %s', itemNumber)

    url = itemUrl % (itemNumber,)
    data = urllib2.urlopen(url).read()

    i = ItemPageParser(itemNumber)
    i.feed(data)
    
    maxPageNumber = getMaxPageNumber(data)
    logger.debug('item ' + itemNumber + ' has ' + str(maxPageNumber) + ' pages of consumables')

    for pageNumber in range(2,maxPageNumber+1):
        logger.debug('fetching page ' + str(pageNumber))
        url = (itemAdditionalPagesPattern % itemNumber) + str(pageNumber)
        data = urllib2.urlopen(url).read()
        i.feed(data)
    logger.debug('there were ' + str(len(i.links)) + ' consumers')
    return i.links

class ItemPageParser(HTMLParser.HTMLParser):
    def __init__(self, itemNumber):
        self.title = None
        self.readingTitle = False
        self.inPeopleList = False
        self.itemNumber = str(itemNumber)
        self.links = []
        self.nextEntry = None
        HTMLParser.HTMLParser.__init__(self)
        
    def handle_starttag(self, tag, attrs):
        
        if self.title is None and tag == 'title':
            self.readingTitle = True
            self.title = ''
            
        elif tag  == 'div' and len(attrs) > 0 and attrs[0][0] == 'class' and attrs[0][1] == 'people':
            self.inPeopleList = True

        elif self.inPeopleList \
             and tag == 'a' \
             and len(attrs) == 2 \
             and attrs[0][0] == 'href' \
             and attrs[0][1].startswith('/person/'):
            thisPerson = attrs[0][1].split('/')[2]
            self.nextEntry = data.Consumable(thisPerson)
            self.nextEntry.consumed = True
            self.nextEntry.itemNumber = self.itemNumber
            self.nextEntry.itemTitle = self.title

        elif self.inPeopleList \
             and tag == 'span' \
             and self.nextEntry is not None \
             and len(attrs) > 0 \
             and attrs[0][0] == 'class' and attrs[0][1] in ['worthit', 'notworthit']:
            if attrs[0][1] == 'worthit':
                self.nextEntry.rating = WORTHIT_SCORE
            else:
                self.nextEntry.rating = NOTWORTHIT_SCORE
            self.links.append(self.nextEntry)
            self.nextEntry = None

    def handle_data(self, data):
        if self.readingTitle:
            self.title += data

    def handle_endtag(self, tag):
        if tag == 'title':
            self.readingTitle = False
        if tag == 'ol':
            self.inPeopleList = False

def getUserConsumables(userName):

    logger.info('fetching items for user %s', userName)
    u = ConsumablePageParser(userName)
    url = personPattern % userName
    data = urllib2.urlopen(url).read()
    u.feed(data)

    result = u.links[:]
        
    maxPageNumber = getMaxPageNumber(data)
    logger.debug('user ' + userName + ' has ' + str(maxPageNumber) + ' pages of consumables')
    
    for i in range(2, maxPageNumber + 1):
        u = ConsumablePageParser(userName)
        url = (personAdditionalPagesPattern % userName) + str(i)
        u.feed(urllib2.urlopen(url).read())
        result.extend(u.links[:])
    return result

class ConsumablePageParser(HTMLParser.HTMLParser):
    def __init__(self, userName):
        self.userName = userName
        self.inLi = False
        self.inItem = False
        self.inEntries = False
        self.links = []
        self.nextEntry = None
        
        HTMLParser.HTMLParser.__init__(self)

    def handle_starttag(self, tag, attrs):
        if tag == 'li':
            self.inLi = True
        elif self.inLi:
            if tag == 'a' and attrs[0][0] == 'href' and attrs[0][1].find('/tag/') != -1:
                    url = attrs[0][1]
                    tag = url.split('/')[-1]
                    self.nextEntry.tags.append(tag)
            elif tag == 'a':
                for attr in attrs:
                    if attr[0] == 'href' and attr[1].startswith('/item/view'):
                        self.inItem = True
                        if not self.nextEntry:
                            self.nextEntry = data.Consumable(self.userName)
                        self.nextEntry.itemNumber = attr[1][11:]
                    elif attr[0] == 'class' and attr[1] == 'progress_link':
                        self.inEntries = True

            elif tag == 'img':
                for attr in attrs:
                    if attr[0] == 'alt' and attr[1] == 'Check-big':
                        self.nextEntry = data.Consumable(self.userName)
                        self.nextEntry.consumed = True
            elif tag == 'span':
                for attr in attrs:
                    if attr[0] == 'class':
                        if attr[1] == 'worthit':
                            self.nextEntry.rating = WORTHIT_SCORE
                        elif attr[1] == 'notworthit':
                            self.nextEntry.rating = NOTWORTHIT_SCORE
                            
    def handle_data(self, data):
        if self.inEntries:
            self.nextEntry.numEntries = int(data.split()[0])
            self.inEntries = False
        elif self.inItem:
            self.nextEntry.itemTitle += data

    def handle_entityref(self, name):
        def translateEntityRef(name, refs = {'amp': '&'}):
            return refs.get(name, name)
        
        if self.inItem:
            self.nextEntry.itemTitle += translateEntityRef(name)

    def handle_endtag(self, tag):
        if tag == 'li':
            self.inLi = False
            if self.nextEntry:
                self.links.append(self.nextEntry)
                self.nextEntry = None
        elif tag == 'a' and self.inItem:
            self.inItem = False

def main(args=None):
    if args == None:
        args = sys.argv[1:]
    for arg in args:
        pprint.pprint(getUserRecord(arg))
    return 0


if __name__ == '__main__':
    sys.exit(main())

