from collections import defaultdict, Counter
import gzip, json, os, csv, hashlib, math
from math import sqrt, log, log10, pow

def get_cosine(vec1, vec2):
	intersection = set(vec1.keys()) & set(vec2.keys())
	numerator = sum([vec1[x] * vec2[x] for x in intersection])
	sum1 = sum([vec1[x]**2 for x in vec1.keys()])
	sum2 = sum([vec2[x]**2 for x in vec2.keys()])
	denominator = math.sqrt(sum1) * math.sqrt(sum2)
	if not denominator:
		return 0.0
	else:
		return float(numerator) / denominator


def merge(vec1, vec2):
	result = Counter()
	for term in set(vec1.keys() + vec2.keys()):
		result[term] = float(vec1[term] + vec2[term]) / 2
	return result


print "Load matrices to memory"

BOOKS = json.loads(open("bookids.json",'r').readline())
IDF = Counter(json.loads(gzip.open("IDFmatrix.json.gz",'r').readline()))

centrfile = gzip.open("USER_CENTROIDS.json.gz",'r')
USER_CENTROIDS = defaultdict(Counter)
USER_CLUSTERS = json.loads(centrfile.readline())
for cluster in USER_CLUSTERS:
	USER_CENTROIDS[cluster] = Counter(json.loads(centrfile.readline()))
centrfile.close()

centrfile = gzip.open("PROD_CENTROIDS.json.gz",'r')
PROD_CENTROIDS = defaultdict(Counter)
PROD_CLUSTERS = json.loads(centrfile.readline())
for cluster in PROD_CLUSTERS:
	PROD_CENTROIDS[cluster] = Counter(json.loads(centrfile.readline()))
centrfile.close()

alternate = open("clusterscores.csv",'r')
freader = csv.reader(alternate, delimiter="\t", quotechar='"', quoting=csv.QUOTE_MINIMAL)

BOOK_USER_SCORES = defaultdict(Counter)
for book_cluster in PROD_CLUSTERS:
	row = freader.next()
	row.reverse()
	for user_cluster in USER_CLUSTERS:
		BOOK_USER_SCORES[book_cluster][user_cluster] = row.pop()

alternate.close()


target = open("TEST_USERS.json",'r')
SCORES = defaultdict(Counter)
TEST_USERS = json.loads(target.readline())
for user in TEST_USERS:
	SCORES[user] = Counter(json.loads(target.readline()))
target.close()

def predict(user,book,SMTH):
	temp = Counter()
	for cluster in USER_CLUSTERS:
		temp[cluster]  = get_cosine(USER_CENTROIDS[cluster],user)
	(user_cluster,similarity) = temp.most_common(1)[0]
	temp = Counter()
	for cluster in PROD_CLUSTERS:
		temp[cluster]  = get_cosine(PROD_CENTROIDS[cluster],book)
	(book_cluster,similarity) = temp.most_common(1)[0]
	return SMTH[book_cluster][user_cluster]

def recommend(user,collection,SMTH):
	Ratings = Counter()
	temp = Counter()
	for cluster in USER_CLUSTERS:
		temp[cluster]  = get_cosine(USER_CENTROIDS[cluster],user)
	(user_cluster,similarity) = temp.most_common(1)[0]
	for (book,vector) in collection:
		temp = Counter()
		for cluster in PROD_CLUSTERS:
			temp[cluster]  = get_cosine(PROD_CENTROIDS[cluster],vector)
		(book_cluster,similarity) = temp.most_common(1)[0]
		temp = similarity = 0
		Ratings[book] = SMTH[book_cluster][user_cluster]
	return [x for (x,_) in Ratings.most_common(5)]


BOOKVECTORS = []
for book in BOOKS:
	filename = "BOOK_VECTORS/"+str(book)+".json.gz"
	BOOKVECTORS.append((book, json.loads(gzip.open(filename,'r').readline())))

for user in TEST_USERS:
	print recommend(SCORES[user],BOOKVECTORS,BOOK_USER_SCORES)



