#!/usr/bin/env python 

'''Sample baseline averaging algorithms.''' 

import numpy as N 
from pyflix.algorithms import Algorithm
from pyflix.datasets import RatedDataset
import time
from random import sample

class MovieAverage(Algorithm):
  '''Baseline algorithm that computes the average of all the votes for a movie
  and predicts that for every user.

  This algorithm returns an RMSE score of 1.0528 on the scrubbed dataset.
  '''

  def __init__(self, training_set):
    self._movie_averages = {}
    super(MovieAverage,self).__init__(training_set)

  def __call__(self, movie_id, user_id):
    try: return self._movie_averages[movie_id]
    except KeyError:
      avg = N.average(self._training_set.movie(movie_id).ratings())
      self._movie_averages[movie_id] = avg
      return avg


class UserAverage(Algorithm):
  '''Baseline algorithm that computes the average of all the votes for a user
  and predicts that for every movie.

  This algorithm returns an RMSE score of 1.0688 on the scrubbed dataset.
  '''

  def __init__(self, training_set):
    self._user_averages = {}
    super(UserAverage,self).__init__(training_set)

  def __call__(self, movie_id, user_id):
    try: return self._user_averages[user_id]
    except KeyError:
      avg = N.average(self._training_set.user(user_id).ratings())
      self._user_averages[user_id] = avg
      return avg


class DoubleAverage(MovieAverage,UserAverage):
  '''Returns the average of L{MovieAverage} and L{UserAverage}.

  This algorithm returns an RMSE score of 1.0158 on the scrubbed dataset.
  '''

  def __call__(self, movie_id, user_id):
    return (MovieAverage.__call__(self,movie_id,user_id) +
            UserAverage.__call__(self,movie_id,user_id)) / 2

# get the index of the k largest elements in data
def argmax(k, data):
  from heapq import nlargest
  return [i for x,i in nlargest(k, ((x,i) for i,x in enumerate(data)))]

class Neighborhood(DoubleAverage):

  def __init__(self, training_set):
    super(Neighborhood,self).__init__(training_set)
    self.dict = {}                      # cache of correlations for
    self.dict['m'] = 1                  # current movie
    # self.mu = 3.60330425781
    self.M = 17770                      # total number of movies
    self.K = 15                         # value for K nearest neighbors
    self.ALPHA = 10.
    # timing stuff
    self._i = 1
    self.start = time.time()            # starting time of algorithm
    self.itert = time.time()            # iteration time
    
  def _movie_average(self, movie_id):
    return MovieAverage.__call__(self,movie_id,0)

  def _user_average(self, user_id):
    return UserAverage.__call__(self,0,user_id)

  def _baseline(self, movie_id, user_id):
    return DoubleAverage.__call__(self,movie_id,user_id)

  def _knn(self, movie_id, movies, ratings, prefix='data/'):
    fn = file(prefix+str(movie_id),'r')
    fc = file(prefix+str(movie_id)+'c','r')
    nn = [int(i) for i in fn.readlines()[:self.K]]   # k nearest movies
    nc = [float(x) for x in fc.readlines()[:self.K]] # and their corr
    res = zip(*[(m,r,c) for m,r,c in zip(movies,ratings,nc) if m in nn])
    if res: return res
    else: raise ValueError

  def _pknn(self, min=1, max=17771, prefix='data/'):
    '''Precompute top K nearest neighbors for each movie and write
    them to file.'''
    for self._i in xrange(min,max):
      # largest K correlations
      corr = N.zeros((self.M+1))
      for j in xrange(1,self.M+1):
        if self._i == j: corr[j] = 0
        else: corr[j] = self._c(self._i,j)
      sort = argmax(self.K,corr)
      # save
      f = file(prefix+str(self._i),'w')
      f.write('\n'.join([str(x) for x in sort]))
      f.close()
      self._pm(min, max)                # print timing information

  def _pc(self, min=1, max=17771, prefix='data/'):
    '''Precompute the correlations of top K nearest neighbors for each
    movie and write them to file.'''
    for self._i in xrange(min,max):
      fknn = file(prefix+str(self._i),'r')
      fcor = file(prefix+str(self._i)+'c', 'w')
      # k nearest neighbors
      res = [self._c(self._i,int(j)) for j in fknn.readlines() if int(j) != 0]
      fcor.write('\n'.join([str(x) for x in res]))
      fknn.close()
      fcor.close()
      # print timing information
      if self._i % 10 == 0: self._pm(min,max)

  def _pm(self, min=1, max=17771):
    '''Print an update on how much time remaining.'''
    curt =  time.time()
    it   =  curt-self.itert
    avg  = (curt-self.start)/(self._i-min+1)
    tot  = (curt-self.start)
    rem  = (max-self._i) * avg / 60
    print 'movie %5d time: %6.2f s avg: %6.2f s rem: %5.2f m (%4.2f h) total: %5.2f h' \
          % (self._i, it, avg, rem, rem/60, tot/60**2)
    self.itert = curt

  def _c(self, i, j):
    '''Compute the correlation between movies i,j.'''
    R = self._training_set.ratingsMatrixTo(i,j)
    I = R[:,0] - self._movie_average(i)
    J = R[:,0] - self._movie_average(j)
    return (N.dot(I,J) / N.sqrt( N.dot(I,I) * N.dot(J,J) ))

  def _dc(self, i, j):
    '''Get the correlation between movies i,j by first trying cache of
    current movie i.'''
    try: return self.dict[j]
    except:
      r = self._c(i,j)
      self.dict[j] = r
      return r

  def __call__(self, movie_id, user_id):
    '''Compute the predicted rating of movie: movie_id by user:
    user_id.'''
    # clear cache if we move to new movie
    if self.dict['m'] != movie_id:
      self.dict.clear()
      self.dict['m'] = movie_id
      # print timing information
      # self._pm()
      self._i = movie_id

    rating          = self._baseline(movie_id, user_id)
    user            = self._training_set.user(user_id)
    movies, ratings = zip(*list(user.iterValueRatings()))
    # use only KNN
    # movies, ratings = self._knn(movie_id, movies, ratings)
    try:
      movies, ratings, correlation = self._knn(movie_id, movies, ratings)
    except ValueError:
      return rating
    ratings         = N.array(ratings)
    baselines       = N.array([self._baseline(movie_j, user_id)       \
                               for movie_j in movies])
    # correlation     = N.array([self._dc(movie_id, movie_j)  \
    #                            for movie_j in movies])
    correlation     = N.array(correlation)
    rating         += N.dot((ratings - baselines), correlation) \
                      / (N.sqrt( len(movies) ) + self.ALPHA)
                      # / N.sum( movies )
    return max(min(rating,5.),1.)


# n = Neighborhood(RatedDataset('/netflix/database/training_set'))
