#!/usr/bin/python

import math
from numpy import array, mean, std
from scipy.cluster.vq import vq, kmeans, whiten
import pylab
from netflix_io import *


#MARK: subgenres

def make_subgenres(movies, tag_vector, k):
	movie_id_list, features_list = [], []
	
	for movie_id, features in movies.iteritems():
		movie_id_list.append(movie_id)
		features_list.append(features)
	
	# use k-means to find centroids for subgenres
	whitened = whiten(array(features_list))
	
	seeds = array(whitened[:k])
	book, dist = kmeans(whitened, seeds)
	
	# categorize movies by centroid
	codes, _junk = vq(whitened, book)
	
	feature_clusters = [[] for x in book]
	for	movie_i, code in enumerate(codes):
		feature_clusters[code].append(features_list[movie_i])
	
	# create subgenres from groups
	subgenres = []
	
	for code, centroid in enumerate(book):
		subgenre = Subgenre(centroid, tag_vector)
		subgenres.append(subgenre)
		subgenre.set_cluster(feature_clusters[code])
	
	return subgenres, dist

class Subgenre:
	def __init__(self, centroid, tag_vector):
		assert centroid != None and tag_vector != None
		assert len(centroid) == len(tag_vector)
		
		# data set
		self.centroid = centroid
		self.tag_vector = tag_vector
		
		# cluster statistics
		self.means = None	# [f] as tag_vector
		self.stds = None	# [f]
		self.count = None	#  d
		
		# cached
		self.name = None
	
	def set_cluster(self, cluster): # NOTE: cluster is list of features associated with this subgenre
		assert not self.count
		assert len(cluster)
		
		self.means, self.stds = [], []
		self.count = len(cluster)
		
		def transpose(m): return [[r[c] for r in m] for c in range(len(m[0]))]
		
		features = transpose(cluster)
		for feature in features:
			self.means.append(mean(feature))
			self.stds. append(std (feature))
	
	def get_name(self, sig_mean = 0.3, length_max = 3):
		assert self.count > 0
		
		if self.name: return self.name
		
		# limit by significance and length as requested
		indexed_mean = [(u, i) for i, u in enumerate(self.means) if u > sig_mean]
		indexed_mean.sort(reverse=True)
		
		if length_max != None:
			indexed_mean = indexed_mean[:min(length_max, len(indexed_mean))]
		
		# create and cache name from tags
		names = []
		
		for u, i in indexed_mean:
			tag = self.tag_vector[i][0]
			if is_tag_visible(tag):
				names.append(tag)
		
		self.name = " & ".join(names)
		return self.name

class Predictor:
	def __init__(self, user_id, subgenres):
		assert len(subgenres)
		
		# data set
		self.user_id = user_id
		self.subgenres = subgenres
		
		# ratings statistics
		self.our_codes = None	# [subgenre index] limited to non-zero entries
		self.means = None		# [f] as our_codes
		self.stds = None		# [f]
		self.count = None		#  d
	
	def set_evidence(self, ratings, movies): # FINISH: this can be done iteratively
		assert self.our_codes == None
		assert len(ratings) and movies != None
		
		book = array([x.centroid for x in self.subgenres])
		
		features = array([movies[x[0]] for x in ratings])
		whitened = features # DEBUG: WTF?!? whiten(features) causes NaNs
		
		# categorize ratings by centroid
		codes, _junk = vq(features, book)
		
		rating_clusters = [[] for x in book]
		
		for	rating_i, code in enumerate(codes):
			movie_id, rating = ratings[rating_i]
			rating_clusters[code].append(rating)
		
		# get statistics for non-empty centroids
		self.our_codes = [code for code, cluster in enumerate(rating_clusters) if len(cluster)]
		
		self.means, self.stds = [], []
		self.count = len(ratings)
		
		for code in self.our_codes:
			cluster = rating_clusters[code]
			
			self.means.append(mean(cluster))
			self.stds. append(std (cluster))
		
		return mean(self.stds)
	
	def rmse_for_observations(self, ratings, movies):
		assert self.our_codes != None
		assert len(ratings) and movies != None
		
		book = array([self.subgenres[code].centroid for code in self.our_codes])
		
		features = array([movies[x[0]] for x in ratings])
		whitened = features # DEBUG: WTF?!? whiten(features) causes NaNs
		
		# cluster observations
		codes, _junk = vq(whitened, book)
		
		# predict by cluster
		predicted = [self.means[x] for x in codes]
		rmse = 0
		
		for i in range(len(ratings)):
			error = predicted[i] - ratings[i][1]
			rmse += error*error
		
		rmse = math.sqrt(rmse/len(ratings))
		return rmse


#MARK: main

def find_best_k(movies, tag_vector, user_ratings, max_k):
	k_and_std = [] # [(k, var)]
	last_k = None
	
	for k in range(3, max_k):
		# find subgenres and nop if no change
		subgenres, dist = make_subgenres(movies, tag_vector, k)
		
		if len(subgenres) == last_k: continue
		last_k = len(subgenres)
		
		# build predictors
		user_predictors = {}
		rmses = []
		
		for user_id, ratings in user_ratings.iteritems():
			predictor = Predictor(user_id, subgenres)
			user_predictors[user_id] = predictor
			
			predictor.set_evidence(ratings, movies)
			rmse = predictor.rmse_for_observations(ratings, movies)
			
			rmses.append(rmse)
		
		k_and_std.append((k, mean(rmses)))
	
	best_k, best_std = min(k_and_std, key=lambda x: x[1])
	return best_k, k_and_std


def test_accuracy(movies, tag_vector, user_ratings, subgenres, train_frac):
	# build predictors
	user_predictors = {}
	
	for user_id, ratings in user_ratings.iteritems():
		predictor = Predictor(user_id, subgenres)
		user_predictors[user_id] = predictor
		
		train_len = int(math.floor(len(ratings) * train_frac))
		if not train_len: train_len = 1 # CHECK: should we skip instead?
		evidence = ratings[:train_len]
		
		predictor.set_evidence(evidence, movies)
	
	# run predictors
	rmses = []
	
	for user_id, predictor in user_predictors.iteritems():
		observations = user_ratings[user_id]
		rmse = predictor.rmse_for_observations(observations, movies)
		rmses.append(rmse)
	
	rmse = mean(rmses)
	return rmse


def main():
	# parameters
	max_movies = None
	max_tags = 30
	max_users = 10000
	max_k = 5*max_tags
	
	# load movie feature data
	print "loading movies"
	movies, tag_vector, miss_count = read_movie_features(max_movies, max_tags)
	
	print "\tselected %d tags" % len(tag_vector)
	print "\tlimited to %d/%d movies" % (len(movies), len(movies) + miss_count)
	
	# load users and ratings
	print "loading users"
	users = read_users(movies, max_users)
	
	print "\tselected %d users" % len(users)
	
	# load ratings
	print "loading user ratings"
	user_ratings = read_user_ratings(users, movies)
	
	# optimize number of clusters
	print "finding best k"
	best_k, k_and_std = find_best_k(movies, tag_vector, user_ratings, max_k)
	
	pylab.plot([d[0] for d in k_and_std], [d[1] for d in k_and_std])
	pylab.title("Cluster size optimization")
	pylab.xlabel("k (# clusters)")
	pylab.ylabel("mean cluster stdev")
	pylab.savefig("k_opt.pdf", format="pdf")
	
	# # DEBUG: based on optimization using max_users=1000, max_k=5*max_tags
	# best_k = 59
	
	# make subgenres
	subgenres, dist = make_subgenres(movies, tag_vector, best_k)
	
	print "using %d subgenres, dist = %f:" % (len(subgenres), dist)
	for subgenre in subgenres:
		print "\t\t%s (%d)" % (subgenre.get_name(), subgenre.count)
	
	# run experiments
	train_and_rmse = []
	
	train_fracs = [x/10.0 for x in range(1,11)]
	for train_frac in train_fracs:
		rmse = test_accuracy(movies, tag_vector, user_ratings, subgenres, train_frac)
		train_pct = 100*train_frac
		train_and_rmse.append((train_pct, rmse))
	
	pylab.plot([d[0] for d in train_and_rmse], [d[1] for d in train_and_rmse])
	pylab.title("RMSE with training")
	pylab.xlabel("% training")
	pylab.ylabel("RMSE")
	pylab.savefig("train_rmse.pdf", format="pdf")
	
	print train_and_rmse



#MARK: start

if __name__ == "__main__":
	try:
		exit(main())
	except KeyboardInterrupt:
		print "user canceled"
		exit(1)
