#!/usr/bin/env python

# http://devlicio.us/blogs/billy_mccafferty/archive/2006/11/07/netflix-memoirs-using-the-pearson-correlation-coefficient.aspx


import logging
import logging.config

# Set up the logger - if we don't read the config before loading the
# other modules, the logging doesn't
logging.config.fileConfig('converge.config')
logger = logging.getLogger('converge')

import ConfigParser
import sys
import optparse
import pprint
import HTMLParser
import math
import time
import allconsuming
import allconsuming.webdac
import allconsuming.filedac
import utils

c = ConfigParser.ConfigParser()
c.read('converge.config')
MINIMUM_CORRELATION = c.getfloat('ratings', 'minimum_correlation')
MINIMUM_NUMBER_OF_RATINGS = c.getint('ratings', 'minimum_number_of_ratings')
MINIMUM_SCORE = c.getint('ratings', 'minimum_score')
MINIMUM_COMMON_RATINGS = c.getint('ratings', 'minimum_common_ratings')
SIMILARITY_WEIGHTING_THRESHOLD = 15 # 2 * MINIMUM_COMMON_RATINGS
NEIGHBOURHOOD_SIZE = 50

def readExtraRatings(userName, fileName):
    extraRatings = []
    try:
        f = file(fileName, 'r')
        for line in f:
            (title, itemNumber, rating, tags) = map(str.strip, line.split('|'))
            extra = allconsuming.data.Consumable(userName)
            extra.itemNumber = itemNumber
            extra.consumed = True
            if 'False' == rating:
                extra.rating = 2
            else:
                extra.rating = 4
            
            extra.itemTitle = title
            extra.tags = map(str.strip, tags.split(','))
            

            extraRatings.append(extra)
    except:
        logger.error('unable to read extra ratings from ' + fileName + ' - ignoring', exc_info=True)
        return []

    logger.debug('extra ratings =' + pprint.pformat(extraRatings))
    return extraRatings


itemMap = utils.getEquivalenceMap('equivalenceMap.sq3')
logger.info(pprint.pformat(itemMap))

def ratingFromConsumable(c):
    return c.rating

def averageRating(ratings):
    total = 0.0
    for r in ratings:
        total += r.rating
    return total/len(ratings)


def correlateUserItems(user1Ratings, user2Ratings):
    numRatings = 0
    user1Average = averageRating(user1Ratings)
    user2Average = averageRating(user2Ratings)

    top = 0.0
    left = 0.0
    right = 0.0

    for rating1 in user1Ratings:
        consumable1Rating = ratingFromConsumable(rating1) - user1Average
        item1Number = rating1.itemNumber
        for rating2 in user2Ratings:
            if item1Number == rating2.itemNumber:
                numRatings += 1
                consumable2Rating = ratingFromConsumable(rating2) - user2Average
                contribution = consumable1Rating * consumable2Rating 
                top += contribution
                left += consumable1Rating**2
                right += consumable2Rating**2
                logger.debug('item number ' + item1Number + ':  ' + rating1.itemTitle + ' ratings: ' + str(consumable1Rating) + ' ' + str(consumable2Rating))
                break

    if numRatings < MINIMUM_COMMON_RATINGS:
        logger.debug('only ' + str(numRatings) + ' common ratings - no correlation')
        return 0
        
    logger.debug('correlateUserItems: top, left, right: ' + str((top, left, right)))

    if left * right == 0:
        logger.debug('at least one user does not vary enough - no correlation')
        return 0
        
    result = top/(math.sqrt(left*right))

    if numRatings < SIMILARITY_WEIGHTING_THRESHOLD:
        logger.debug('there are only ' + str(numRatings) + ' common ratings - scaling')
        result = result * numRatings / SIMILARITY_WEIGHTING_THRESHOLD

    return result


def parseArgs(args):
    optParser = optparse.OptionParser()
    optParser.add_option('--user', action='store', type='string', dest='user',
                         default='Hippopottoman')
    optParser.add_option('--no-read', action='store_false', dest='read', default=True,
                         help='read users and items from web instead of cache')
    optParser.add_option('-v', '--verbose', action='store_true', dest='verbose', default=False,
                         help="provide feedback about what's going on")
    optParser.add_option('--save', action='store_true', dest='save',
                         help='save user and items to files for future processing')
    optParser.add_option('--tag', action='store', dest='tag', type='string', default=None,
                         help='only use items with specified tag to generate recommendations')
    
    return optParser.parse_args(args)

def verbose(message):
    global options
    if options.verbose:
        print message
    logger.info(message)

def loadUserRatings(userName):
    global options
    loadedFromWeb = False
    r = None
    if options.read:
        logger.debug('loading user ' + userName + ' from cache')
        r = allconsuming.filedac.getUserConsumables(userName)
    if r is None:
        logger.debug('loading user ' + userName + ' from web')
        r = allconsuming.webdac.getUserConsumables(userName)
        loadedFromWeb = True
        time.sleep(2)

    if loadedFromWeb and r is not None and options.save:
        logger.debug('saving user ' + userName + ' to cache')
        allconsuming.filedac.saveUserConsumables(userName, r)

    return r

def recommendItem(principalAverage, userAverages, principalUserCorrelations, itemRatings):
    #logger.debug(pprint.pformat([principalAverage, userAverages, principalUserCorrelations, itemRatings]))

    sumRecommentations = 0.0
    sumCorrelations = 0.0
    for itemRating in itemRatings:
        correlation = principalUserCorrelations[itemRating.userName]
        sumCorrelations += math.fabs(correlation)
        effectiveRating = itemRating.rating - userAverages[itemRating.userName]
        sumRecommentations+= correlation * effectiveRating
        logger.debug('recommandItem: item = %s, correlation = %g, rating = %g' % (itemRating.itemNumber, correlation, effectiveRating))
        
    logger.debug('recommendItem recommendation = ' + str(principalAverage + sumRecommentations / sumCorrelations))
    return principalAverage + sumRecommentations / sumCorrelations

def rateItem(ratings):
        total = 0
        count = 0
        for r in ratings:
            total += r
            count += abs(r)
        
        return total/count

def loadItemConsumables(itemNumber):
    global options
    loadedFromWeb = False
    users = None
    if options.read:
        logger.debug('loading item ' + itemNumber + ' from cache')
        users = allconsuming.filedac.getItemConsumables(itemNumber)
    if users is None:
        logger.debug('loading item ' + itemNumber + ' from web')
        users = allconsuming.webdac.getItemConsumables(itemNumber)
        loadedFromWeb = True
        time.sleep(2)        

    if loadedFromWeb and users is not None and options.save:
        logger.debug('saving users for item ' + itemNumber + ' to cache')
        allconsuming.filedac.saveItemConsumables(itemNumber, users)
    return users

def removeUnrated(ratings):
    return [ r for r in ratings if r.rating != 0]

def main(args=None):
    
    global options
    if args == None:
        args = sys.argv[1:]

    (options, args) = parseArgs(args)

    verbose('loading items for user %s' % (options.user,))

    principalHasConsumed = loadUserRatings(options.user)
    principalHasConsumed.extend(readExtraRatings(options.user, 'extraRatings.txt'))
    principalHasConsumed = utils.expandSynonyms(principalHasConsumed, itemMap)

    # limit recommendation fodder to those with the specified tag
    if options.tag:
        principalRatings = filter(lambda x: options.tag in x.tags, principalHasConsumed)
    else:
        principalRatings = principalHasConsumed[:]

    principalConsumablesToLoad = principalRatings[:]
    principalRatings = removeUnrated(principalRatings)
    principalRatings = utils.normalizeSynonyms(principalRatings, itemMap)            

    logger.debug(options.user + ' had these opinions: ' + pprint.pformat(principalRatings))
        
    stuff = {}
    allUsers = {}


    itemCount = 1
    verbose('About to fetch %d items' % (len(principalConsumablesToLoad)))
    for rating in principalConsumablesToLoad:
        itemNumber = rating.itemNumber
        verbose('Fetching item ' + str(itemCount) + ' - ' + itemNumber + ': ' + rating.itemTitle)
        itemCount += 1
        itemConsumables = loadItemConsumables(itemNumber)
        for c in itemConsumables:
            if c.userName == options.user:
                itemConsumables.remove(c)
                break
        
        if len(itemConsumables) > 0:
            stuff[itemNumber] = itemConsumables
            logger.debug('item ' + itemNumber + ' was consumed by ' + pprint.pformat([x.userName for x in itemConsumables]))
            itemConsumables = utils.normalizeSynonyms(itemConsumables, itemMap)            
            for itemConsumable in itemConsumables:
                record = allUsers.get(itemConsumable.userName, [])
                if itemNumber not in record:
                    record.append(itemConsumable)
                    allUsers[itemConsumable.userName] = record


    # cull user from the list - shouldn't be needed
    if options.user in allUsers:
        del allUsers[options.user]
    
    logger.debug('raw list of users: ' + pprint.pformat(allUsers.keys()))
        
    for (user, ratings) in allUsers.items():
       #print user, len(ratings)
       if len(ratings) < MINIMUM_COMMON_RATINGS:
           logger.debug('user ' + user + ' has only ' + str(len(ratings)) + ' ratings - culling')
           del allUsers[user]

    logger.debug('culled list of users: ' + pprint.pformat(allUsers.keys()))

    correlations = {}
    savedUserRatings = {}

    userNumber = 1

    for (user, userRatings) in allUsers.items():
        # find the strength of the relationship
        correlation = correlateUserItems(principalRatings, userRatings)

        logger.info('correlation between ' + options.user + ' and ' + user + ' = ' + str(correlation))
        
        if math.fabs(correlation) > 0.05:
            correlations[user] = correlation

    sortedUsers = []
    for (user, correlation) in correlations.items():
        sortedUsers.append((math.fabs(correlation),correlation,user))
    sortedUsers.sort(reverse=True)
    sortedUsers = sortedUsers[:NEIGHBOURHOOD_SIZE]

    verbose('About to fetch %d users' % (len(sortedUsers)))
        
    for (junk, rating, user) in sortedUsers:
        verbose('Fetching user %4d - %s' % (userNumber, user))
        userNumber += 1

        userRatings = removeUnrated(loadUserRatings(user))
        if len(userRatings) == 0:
            logger.debug(user + ' had no opinions')
            continue
        logger.debug(user + ' had these opinions: ' + pprint.pformat(userRatings))
        userRatings = utils.normalizeSynonyms(userRatings, itemMap)
        savedUserRatings[user] = userRatings

    averageUserRatings = {}
    for (user, userRatings) in savedUserRatings.items():
        averageUserRatings[user] = averageRating(userRatings)
        
    allItemRatings = {}
    for (user, ratings) in savedUserRatings.items():
        for rating in ratings:
            itemRatings = allItemRatings.get(rating.itemNumber, [])
            itemRatings.append(rating)
            allItemRatings[rating.itemNumber] = itemRatings

    finalRatings = []
    for itemNumber in allItemRatings:
        found = False
        for pRating in principalHasConsumed:
            if pRating.itemNumber == itemNumber:
                found = True
                break
        if found:
            continue
        ratings = allItemRatings[itemNumber]
        if len(ratings) < MINIMUM_NUMBER_OF_RATINGS:
            logger.debug(str(itemNumber) + ' has only ' + str(len(ratings)) + ' ratings - skipping')
            continue
        finalRatings.append([recommendItem(averageRating(principalRatings), averageUserRatings, correlations, ratings), ratings[0].itemTitle, itemNumber])
                
    finalRatings.sort()

    logger.debug('Final ratings: ' + pprint.pformat(finalRatings))
    print 'Recommendations'
    for r in finalRatings:
        if math.fabs(r[0]) >= MINIMUM_SCORE:
            print '%5.2f %s - %s' % tuple(r)
        
    return 0

if __name__ == '__main__':
    sys.exit(main())
    
