from collections import defaultdict, Counter
import gzip, json, os, csv, hashlib, math, sys
from math import sqrt, log, log10, pow

def get_cosine(vec1, vec2):
	intersection = set(vec1.keys()) & set(vec2.keys())
	numerator = sum([vec1[x] * vec2[x] for x in intersection])
	sum1 = sum([vec1[x]**2 for x in vec1.keys()])
	sum2 = sum([vec2[x]**2 for x in vec2.keys()])
	denominator = math.sqrt(sum1) * math.sqrt(sum2)
	if not denominator:
		return 0.0
	else:
		return float(numerator) / denominator


def merge(vec1, vec2):
	result = Counter()
	for term in set(vec1.keys() + vec2.keys()):
		result[term] = float(vec1[term] + vec2[term]) / 2
	return result

try:
	PARAM = sys.argv[1]
except:
    PARAM = 300

print "reading data from local copy"
RATINGS = Counter()
BOOKS_WITH_REVIEWS = set()
orfile = gzip.open("ALL_Ratings.json.gz",'r')
buf = orfile.readline()
while buf != "":
	temp = json.loads(buf)
	RATINGS[(temp[0],temp[1])] = int(float(temp[2]))
	BOOKS_WITH_REVIEWS.add(temp[1])
	buf = orfile.readline()

orfile.close()

print "cleaning books without descriptions"
BOOKS_WITH_DESCRIPTION = set([filename.split(".")[0] for filename in os.listdir("TFMATRIX")])
REVIEWED_BOOKS_WITH_DESCRIPTION = BOOKS_WITH_DESCRIPTION & BOOKS_WITH_REVIEWS
BOOKS_TO_REMOVE = BOOKS_WITH_REVIEWS - REVIEWED_BOOKS_WITH_DESCRIPTION
CORPUS = BOOKS_WITH_REVIEWS - BOOKS_TO_REMOVE

PROD_count = defaultdict(Counter)
PROD = Counter()
for (u,p) in RATINGS.keys():
	PROD_count[p][u] += 1

for product in PROD_count:
	PROD[product] = len(PROD_count[product])

for product in BOOKS_TO_REMOVE:
	PROD[product] = 0
	for user in PROD_count[product]:
		RATINGS[(user, product)] = 0

#------------------------------------------------------------------------------------------------

print "trying to make sparse matrix denser"
print "keeping 300 most reviewed books"

PROD_count = defaultdict(Counter)
for (u,p) in RATINGS.keys():
	PROD_count[p][u] += 1

PROD = Counter()
for product in PROD_count:
	PROD[product] = len(PROD_count[product])

COMMON = set([c for (c,_) in PROD.most_common(PARAM)])

for product in (CORPUS - COMMON):
	PROD[product] = 0
	for user in PROD_count[product]:
		RATINGS[(user, product)] = 0

print "removing users with less than 5 reviews"
USER_count = defaultdict(Counter)
for (u,p) in RATINGS.keys():
	USER_count[u][p] += 1

USERS = Counter()
for user in USER_count:
	USERS[user] = len(USER_count[user])

LEAST = set([u for u in USERS if USERS[u] < 5])

for user in LEAST:
	for product in USER_count[user].keys():
		RATINGS[(user, product)] = 0



USER_count = PROD_count = 0

#------------------------------------------------------------------------------------------------

RATINGS += Counter()

USERS = Counter()
PROD = Counter()
SCORES = defaultdict(Counter)

print "creating backup"
orfile = gzip.open("Ratings.json.gz",'w')
for (user,product) in RATINGS.keys():
	SCORES[user][product] = RATINGS[(user,product)]
	orfile.write(json.dumps((user,product,RATINGS[(user,product)]))+"\n")
	USERS[user] += 1
	PROD[product] += 1

orfile.close()
USERS = sorted(USERS.keys())
BOOKS = sorted(PROD.keys())
NUM_OF_BOOKS = len(BOOKS)
NUM_OF_USERS = len(USERS)
print "Users: ", NUM_OF_USERS, " and Books: ", NUM_OF_BOOKS


RATINGS = 0
print "all backed up, moving to next step"

bookids = open("bookids.json",'w')
bookids.write(json.dumps(BOOKS))
bookids.close()

userids = open("userids.json",'w')
userids.write(json.dumps(USERS))
userids.close()

#------------------------------------------------------------------------------------------------

print "creating the user-book matrix needed for rocchio"
alternate = open("scorevectors.csv",'w')	# changed so that each line is a book!
fwriter = csv.writer(alternate, delimiter="\t", quotechar='"', quoting=csv.QUOTE_MINIMAL)
for user in USERS:
	vector = []
	for product in BOOKS:
		vector.append(SCORES[user][product])
	fwriter.writerow(vector)
alternate.close()

#------------------------------------------------------------------------------------------------

print "calculating user clusters"
USER_CLUSTERS = defaultdict(list)
USER_CENTROIDS = defaultdict(Counter)

USER_CLUSTERS[hashlib.md5(USERS[0]).hexdigest()].append(USERS[0])
USER_CENTROIDS[hashlib.md5(USERS[0]).hexdigest()] = SCORES[USERS[0]]

for user in USERS[1:]:	# iterate through users
	temp = Counter()
	for centroid in USER_CENTROIDS:	# check user against all clusters
		temp[centroid]  = get_cosine(USER_CENTROIDS[centroid],SCORES[user])
	(c,m) = temp.most_common(1)[0]	# find the cluster with the maximum similarity
	if m > 0.75 :			# if similarity is more than 0.9 add user to the cluster
		USER_CLUSTERS[c].append(user)
		USER_CENTROIDS[c] = merge(USER_CENTROIDS[c],SCORES[user])
	else:				# else create new cluster
		USER_CLUSTERS[hashlib.md5(user).hexdigest()].append(user)
		USER_CENTROIDS[hashlib.md5(user).hexdigest()] = SCORES[user]

SCORES = defaultdict(Counter)

clfile = gzip.open("USER_CLUSTERS.json.gz",'w')
clfile.write(json.dumps(USER_CLUSTERS)+"\n")
clfile.close()
clfile = gzip.open("USER_CENTROIDS.json.gz",'w')
clfile.write(json.dumps(USER_CENTROIDS)+"\n")
clfile.close()

#------------------------------------------------------------------------------------------------

print "creating the user-book matrix needed for rocchio"

alternate = open("clusterscores.csv",'w')	# changed so that each line is a book!
fwriter = csv.writer(alternate, delimiter="\t", quotechar='"', quoting=csv.QUOTE_MINIMAL)
for cluster in USER_CENTROIDS:
	vector = []
	for product in BOOKS:
		vector.append(USER_CENTROIDS[cluster][product])
	fwriter.writerow(vector)
alternate.close()

"""
USERID_CLUSTER = dict()
	for clust in CLUSTERS:
		for user in CLUSTERS[clust]:
			USERID_CLUSTER[user] = clust


USER_CLUSTER_keys = CENTROIDS.keys()
outer = []
for vec1 in USER_CLUSTER_keys:
	inner = []
	for vec2 in USER_CLUSTER_keys:
		inner.append(get_cosine(CENTROIDS[vec1],CENTROIDS[vec2]))
	outer.append(inner)


#[len(CLUSTERS[c]) for c in CLUSTERS]

#DOCS -> [(userid,Counter)]
#SCORES -> {userid:Counter}
"""
#------------------------------------------------------------------------------------------------

print "recalculating tfs"
iterator = 0
IDF = Counter()

for book in BOOKS:
	iterator += 1
	print "iteration ", iterator
	filename = "TFMATRIX/"+str(book)+".json.gz"
	FREQ = Counter(json.loads(gzip.open(filename,'r').readline()))
	if len(FREQ) == 0:
		os.remove(filename)
	else:
		MIN = min(FREQ.values())
		for word in FREQ:
			FREQ[word] = FREQ[word] / MIN
	for word in FREQ:
		if not all([w.isalpha() for w in word.split("-")]):
			FREQ[word] = 0
	FREQ += Counter()
	TF = Counter()
	S = sum(FREQ.values())
	if len(FREQ) == 0:
		M = 0
	else:
		M = max(FREQ.values())
	for word in FREQ:
		TF[word] = ( FREQ[word] / S ) / ( M / S )
		IDF[word] += 1
	filename = "BOOK_TF/"+str(book)+".json.gz"
	gzip.open(filename,'w').write(json.dumps(TF))



print "calculating idfs"
TERMS = sorted(IDF.keys())
N = float(len(BOOKS))
for word in TERMS:
	IDF[word] = N / float(IDF[word])

gzip.open("IDFmatrix.json.gz",'w').write(json.dumps(IDF))
#IDF = Counter(json.loads(gzip.open("IDFmatrix.json.gz",'r').readline()))


terms = open("terms.json",'w')
terms.write(json.dumps(TERMS))
terms.close()

#------------------------------------------------------------------------------------------------

print "calculating weights"
#outfile = gzip.open("Weights.json.gz",'w')

weightfile = open("weightvectors.csv",'w')
weightwriter = csv.writer(weightfile, delimiter="\t", quotechar='"', quoting=csv.QUOTE_MINIMAL)
freqfile = open("frequencies.csv",'w')
freqwriter = csv.writer(freqfile, delimiter="\t", quotechar='"', quoting=csv.QUOTE_MINIMAL)
iterator = 0
for book in BOOKS:
	WEIGHT = Counter()
	vector = []
	freqtor = []
	iterator += 1
	print "iteration ", iterator
	filename = "BOOK_TF/"+str(book)+".json.gz"
	TF = Counter(json.loads(gzip.open(filename,'r').readline()))
	const = sqrt(sum( [pow(TF[w],2)*log10(pow(IDF[w],2)) for w in TF] ))
	for word in TF:
		WEIGHT[word] = (TF[word] * log10(IDF[word])) / const
	SCORES[book] = WEIGHT
	for word in TERMS:
		vector.append(WEIGHT[word])
		freqtor.append(TF[word])
	#outfile.write(json.dumps(vector)+'\n')
	weightwriter.writerow(vector)
	freqwriter.writerow(freqtor)

#outfile.close()
weightfile.close()
freqfile.close()

#------------------------------------------------------------------------------------------------
"""SCORES = defaultdict(Counter)
BOOKS = [f.split(".")[0] for f in os.listdir("BOOK_TF/")]
for book in BOOKS:
	SCORES[book] = Counter(json.loads(gzip.open("BOOK_TF/"+book+".json.gz",'r').readline()))
	if len(SCORES[book]) == 0:
		del SCORES[book]
		del BOOKS[book]
		os.remove("BOOK_TF/"+book+".json.gz")
"""
print "clustering books"

PROD_CLUSTERS = defaultdict(list)
PROD_CENTROIDS = defaultdict(Counter)

PROD_CLUSTERS[hashlib.md5(BOOKS[0]).hexdigest()].append(BOOKS[0])
PROD_CENTROIDS[hashlib.md5(BOOKS[0]).hexdigest()] = SCORES[BOOKS[0]]

for book in BOOKS[1:]:	# iterate through books
	temp = Counter()
	for centroid in PROD_CENTROIDS:	# check book against all clusters
		temp[centroid]  = get_cosine(PROD_CENTROIDS[centroid],SCORES[book])
	(c,m) = temp.most_common(1)[0]	# find the cluster with the maximum similarity
	if m > 0.9 :			# if similarity is more than 0.9 add user to the cluster
		PROD_CLUSTERS[c].append(book)
		PROD_CENTROIDS[c] = merge(PROD_CENTROIDS[c],SCORES[book])
	else:				# else create new cluster
		PROD_CLUSTERS[hashlib.md5(book).hexdigest()].append(book)
		PROD_CENTROIDS[hashlib.md5(book).hexdigest()] = SCORES[book]

clfile = gzip.open("PROD_CLUSTERS.json.gz",'w')
clfile.write(json.dumps(PROD_CLUSTERS)+"\n")
clfile.close()
clfile = gzip.open("PROD_CENTROIDS.json.gz",'w')
clfile.write(json.dumps(PROD_CENTROIDS)+"\n")
clfile.close()

print "creating the book-terms matrix needed for rocchio"

alternate = open("clusterbooks.csv",'w')	# changed so that each line is a book!
fwriter = csv.writer(alternate, delimiter="\t", quotechar='"', quoting=csv.QUOTE_MINIMAL)
for cluster in PROD_CENTROIDS:
	vector = []
	for word in TERMS:
		vector.append(PROD_CENTROIDS[cluster][word])
	fwriter.writerow(vector)
alternate.close()

#CLUSTERS = json.loads(gzip.open("USER_CLUSTERS.json.gz",'r').readline())
#[len(CLUSTERS[k]) for k in CLUSTERS.keys() if len(CLUSTERS[k]) > 1]


