#!/usr/bin/env python

from optparse import OptionParser
import operator
from collections import defaultdict
import itertools
import logging
import math
import sys
import os
from os import path
import time

from util import *
from predictors import *

def read_data(filename):
    """Read ratings data from file.  
    Expected file format is a tab separated list of 
    user id | item id | rating | timestamp.

    Returns a list of tuples: (userid, itemid, rating)
    """
    r = []
    with open(filename) as f:
        for line in f:
            try:
                (user, item, rating) = line.split('\t')[0:3]
            except ValueError:
                logging.error("Error reading line: '%s'" % line, exc_info=True)
            r.append((int(user), int(item), float(rating)))

    return r


def read_item_info(filename):
    """
    Read info about movies from filename.  Expected format:
    one line per movie. Format:
    
    movie id | movie title | release date | video release date |
    IMDb URL | unknown | Action | Adventure | Animation |
    Children's | Comedy | Crime | Documentary | Drama | Fantasy |
    Film-Noir | Horror | Musical | Mystery | Romance | Sci-Fi |
    Thriller | War | Western |

    The last 19 fields are the genres, a 1 indicates the movie
    is of that genre, a 0 indicates it is not; movies can be in
    several genres at once.
    The movie ids are the ones used in the u.data data set.

    Returns dict of movie_id -> title
    """
    d = {}
    with open(filename) as f:
        for line in f:
            try:
                (id, title) = line.split('|')[0:2]
            except ValueError:
                logging.error("Error reading line: '%s'" % line, exc_info=True)

            d[int(id)] = title

    return d


def configure_logging(loglevel):
    numeric_level = getattr(logging, loglevel.upper(), None)
    if not isinstance(numeric_level, int):
        raise ValueError('Invalid log level: %s' % loglevel)

    root_logger = logging.getLogger('')
    strm_out = logging.StreamHandler(sys.__stdout__)
    strm_out.setFormatter(logging.Formatter('%(message)s'))
    root_logger.setLevel(numeric_level)
    root_logger.addHandler(strm_out)

def debugging():
    return logging.getLogger('').isEnabledFor(logging.DEBUG)

def add_parser_options(parser):
    parser.add_option("--loglevel",
                      dest="loglevel", default="info",
                      help="Set the logging level: 'debug' or 'info'")

    parser.add_option("--item_info",
                      dest="item_info", default="u.item",
                      help="Name of the item info file (default: %default)")

    parser.add_option("--data_dir",
                      dest="data_dir", default="./ml-100k",
                      help="Path to the data files (default: %default)")

    parser.add_option("--N",
                      dest="N", default=10,
                      type=int,
                      help="Neighborhood size (default: %default)")

    


def run_test(predictor, train_data, test_data, options):
    """
    Get predictions for each element of the test data.
    Data format (user_id, item_id, rating).  
    """
    x = time.time()
    p = predictor(train_data, options)

    predictions = [p.predict(uid, mid) for (uid, mid, _) in test_data]
    actual = [t[2] for t in test_data]
    if debugging():
        logging.debug("================ Detailed results for %s================" % (predictor.__name__))
        logging.debug("%6s%6s%6s%6s" % ("uid", "mid", "pred", "act"))
        for (t, p, a) in zip(test_data, predictions, actual):
            logging.debug("%6d%6d%6.2f%6.1f" % (t[0], t[1], p, a))

    # compute the RMSE
    n = float(len(test_data))
    rmse = math.sqrt(mean((p-a)*(p-a) for (p, a) in zip(predictions, actual)))
    y = time.time()
    logging.info("")
    logging.info("Results for %s" % (predictor.__name__))
    logging.info("RMSE: %.2f" % rmse)
    logging.info("Time needed: %.2f seconds" %(y-x))
    print('')
    return rmse

def main():
    # Setup
    usage_msg = "Usage:  %prog [options] train.data test.data"
    parser = OptionParser(usage=usage_msg)

    def usage(msg):
        print "Error: %s\n" % msg
        parser.print_help()
        sys.exit()

    add_parser_options(parser)
    (options, args) = parser.parse_args()
    if len(args) != 2:
        usage("Expecting two filenames as arguments.  Given %d." % len(args))
    (train_f, test_f) = args

    configure_logging(options.loglevel)

    def getpath(f):
        return os.path.join(options.data_dir, f)
    
    # read data
    train_data = read_data(getpath(train_f))
    test_data = read_data(getpath(test_f))
    item_info = read_item_info(getpath(options.item_info))

    print('')
    print('**********************')
    print('Start')
    print('**********************')
    print('')
    print('')
    #logging.warn("%d training points, %d testing points" % (len(train_data), len(test_data)))
    #logging.debug("Training data: %s" % train_data)
    #logging.debug("Testing data: %s" % test_data)
    #logging.warn("%d movies in info file" % (len(item_info)))
    #logging.debug("Item info: %s" % item_info)
    print('')

    # Actually do things
#    predictors = [AverageBased, UserBased, ItemBased, MagicBased]
    #predictors = [AverageBased, UserAverageBased, UserBased, ItemBased, MagicBased]
    predictors = [MagicBased]
    
    for p in predictors:   	
        run_test(p, train_data, test_data, options)
    
if __name__ == '__main__':
    main()
