from collections import defaultdict, Counter
import gzip, json, os
from math import sqrt, log, log10, pow


"""

print "reading data from local copy"
RATINGS = Counter()
BOOKS_WITH_REVIEWS = set()
orfile = gzip.open("ALL_Ratings.json.gz",'r')
buf = orfile.readline()
while buf != "":
	temp = json.loads(buf)
	RATINGS[(temp[0],temp[1])] = int(float(temp[2]))
	BOOKS_WITH_REVIEWS.add(temp[1])
	buf = orfile.readline()
orfile.close()

print "cleaning books without descriptions"
BOOKS_WITH_DESCRIPTION = set([filename.split(".")[0] for filename in os.listdir("TFMATRIX")])
REVIEWED_BOOKS_WITH_DESCRIPTION = BOOKS_WITH_DESCRIPTION & BOOKS_WITH_REVIEWS
BOOKS_TO_REMOVE = BOOKS_WITH_REVIEWS - REVIEWED_BOOKS_WITH_DESCRIPTION
CORPUS = BOOKS_WITH_REVIEWS - BOOKS_TO_REMOVE

PROD_count = defaultdict(Counter)
PROD = Counter()
for (u,p) in RATINGS.keys():
	PROD_count[p][u] += 1
for product in PROD_count:
	PROD[product] = len(PROD_count[product])
for product in BOOKS_TO_REMOVE:
	PROD[product] = 0
	for user in PROD_count[product]:
		RATINGS[(user, product)] = 0

iteration = 0
stop = False
print "trying to make sparse matrix denser"
while not stop:
	iteration += 1
	PROD_count = defaultdict(Counter)
	PROD = Counter()
	USERS = set()
	for (u,p) in RATINGS.keys():
		PROD_count[p][u] += 1
		USERS.add(u)
	for product in PROD_count:
		PROD[product] = len(PROD_count[product])
	for (product,_) in PROD.most_common()[-100:]:
		PROD[product] = 0
		for user in PROD_count[product]:
			RATINGS[(user, product)] = 0
			if user in USERS: USERS.remove(user)
	PROD += Counter()
	RATINGS += Counter()
	print "Iteration: ", iteration, " MIN reviews: ", min(PROD.values())
	if (len(PROD) < 500) or (len(USERS) < 65):
		stop = True

NUM_OF_BOOKS = len(PROD)
PROD = PROD_count = 0
USERS = sorted(USERS)

SCORES = defaultdict(Counter)
print "creating backup"
orfile = gzip.open("Ratings.json.gz",'w')
for (user,product) in RATINGS.keys():
	SCORES[product][user] = RATINGS[(user,product)]
	orfile.write(json.dumps((user,product,RATINGS[(user,product)]))+"\n")
orfile.close()
RATINGS = 0
print "all backed up, moving to next step"

BOOKS = sorted(SCORES.keys())
#CORPUS = sorted(["TFMATRIX/"+str(w)+".json.gz" for w in BOOKS])

print "creating the user-book matrix needed for rocchio"
scorefile = gzip.open("Scores.json.gz",'w') # each line has a book and which users have scored it
scorefile.write(json.dumps(BOOKS)+'\n')
scorevec = gzip.open("ScoreVectors.json.gz",'w') # each line has a book and which users have scored it
scorevec.write(json.dumps(BOOKS)+'\n')
scorevec.write(json.dumps(USERS)+'\n')
for product in BOOKS:
	vector = []
	for user in USERS:
		vector.append(SCORES[product][user])
	scorefile.write(json.dumps(SCORES[product])+'\n')
	scorevec.write(json.dumps(vector)+'\n')
scorefile.close()
scorevec.close()
"""

#---------------------------------------------------
scorefile = gzip.open("Scores.json.gz",'r')
BOOKS = Counter(json.loads(scorefile.readline()))
scorefile.close()
print len(BOOKS)

print "recalculating tfs"
iterator = 0
IDF = Counter()
for book in BOOKS:
	iterator += 1
	print "iteration ", iterator
	filename = "TFMATRIX/"+str(book)+".json.gz"
	FREQ = Counter(json.loads(gzip.open(filename,'r').readline()))
	MIN = min(FREQ.values())
	for word in FREQ:
		FREQ[word] = FREQ[word] / MIN
	for word in FREQ:
		if not all([w.isalpha() for w in word.split("-")]):
			FREQ[word] = 0
	FREQ += Counter()
	TF = Counter()
	S = sum(FREQ.values())
	M = max(FREQ.values())
	for word in FREQ:
		TF[word] = ( FREQ[word] / S ) / ( M / S )
		IDF[word] += 1
	filename = "BOOK_TF/"+str(book)+".json.gz"
	gzip.open(filename,'w').write(json.dumps(TF))

print "calculating idfs"
N = float(len(BOOKS))
for word in IDF:
	IDF[word] = N / float(IDF[word])

gzip.open("IDFmatrix.json.gz",'w').write(json.dumps(IDF))
#IDF = Counter(json.loads(gzip.open("IDFmatrix.json.gz",'r').readline()))


bookids = open("bookids.json",'w')
bookids.write(json.dumps(BOOKS))
bookids.close()

print "calculating weights"
outfile = gzip.open("Weights.json.gz",'w')
iterator = 0
for book in BOOKS:
	WEIGHT = Counter()
	vector = []
	iterator += 1
	print "iteration ", iterator
	filename = "BOOK_TF/"+str(book)+".json.gz"
	TF = Counter(json.loads(gzip.open(filename,'r').readline()))
	const = sqrt(sum( [pow(TF[w],2)*log10(pow(IDF[w],2)) for w in TF] ))
	for word in TF:
		WEIGHT[word] = (TF[word] * log10(IDF[word])) / const
	for word in IDF:
		vector.append(WEIGHTS[word])
	outfile.write(json.dumps(vector)+'\n')



outfile.close()
"""
WEIGHTS = defaultdict(Counter)
outfile = gzip.open("Weights.json.gz",'r')
buf = outfile.readline()
BOOKS = json.loads(buf)
buf = outfile.readline()
iterator = 0
while buf != "":
	WEIGHTS[BOOKS[iterator]] = Counter(json.loads(buf))
	buf = outfile.readline()
	iterator += 1

outfile.close()


IDF = Counter(json.loads(gzip.open("IDFmatrix.json.gz",'r').readline()))
IDF = sorted(IDF.keys())

infile = gzip.open("Weights.json.gz",'r')
outfile = gzip.open("WeightVectors.json.gz",'w')
buf = infile.readline()
outfile.write(buf)
outfile.write(json.dumps(IDF)+'\n')
BOOKS = json.loads(buf)
buf = infile.readline()
iterator = 0
while buf != "":
	WEIGHTS = Counter(json.loads(buf))
	vector = []
	for word in IDF:
		vector.append(WEIGHTS[word])
	outfile.write(json.dumps(vector)+'\n')
	buf = infile.readline()
	iterator += 1

outfile.close()
infile.close()
"""










