from collections import defaultdict, Counter
import gzip, json, os
from math import sqrt, log, log10, pow

'''
import MySQLdb
from MySQLdb import cursors

print "connecting to database"
conn = MySQLdb.connect(host="mysql.stalidis.com", user="ict4growth", passwd="stanford", db="ict4growth")
reader = conn.cursor(cursors.SSCursor)
print "connected"
reader.execute("""SELECT Reviews.UserID, Reviews.ProductID, Reviews.Stars FROM Reviews JOIN Users ON Reviews.UserID = Users.UserID WHERE Users.Votes > 800""")
print "executed select statement"
orfile = gzip.open("ALL_Ratings.json.gz",'w')
RATINGS = Counter()
print "reading data from database"
for (user,product,stars) in reader:
	orfile.write(json.dumps((user,product,stars))+"\n")
	RATINGS[(user,product)] = stars
print "loaded data in memory and kept a local backup"
orfile.close()
#reader.close()
conn.close()
'''
print "reading data from local copy"
RATINGS = Counter()
orfile = gzip.open("ALL_Ratings.json.gz",'r')
buf = orfile.readline()
while buf != "":
	temp = json.loads(buf)
	RATINGS[(temp[0],temp[1])] = int(float(temp[2]))
	buf = orfile.readline()
orfile.close()

iteration = 0
stop = False
print "trying to make sparse matrix denser"
while not stop:
	iteration += 1
	print "Iteration: ", iteration
	PROD_count = defaultdict(Counter)
	PROD = Counter()
	for (u,p) in RATINGS.keys(): PROD_count[p][u] += 1
	for product in PROD_count:
		PROD[product] = len(PROD_count[product])
	for (product,_) in PROD.most_common()[-100:]:
		PROD[product] = 0
		for user in PROD_count[product]:
			RATINGS[(user, product)] = 0
	PROD += Counter()
	USER_count = defaultdict(Counter)
	USER = Counter()
	for (u,p) in RATINGS.keys(): USER_count[u][p] += 1
	for user in USER_count:
		USER[user] = len(USER_count[user])
	for (user,_) in USER.most_common()[-10:]:
		USER[user] = 0
		for product in USER_count[user]:
			RATINGS[(user, product)] = 0
	USER += Counter()
	RATINGS += Counter()
	if (len(PROD) < 500) or (len(USER) < 65):
		stop = True

print "keeping ", len(RATINGS.keys()), " scorings from ", len(USER), " users about ", len(PROD), " books"
PROD = len(PROD)
USER = 0
USER_count = 0
PROD_count = defaultdict(Counter)
print "creating backup"
orfile = gzip.open("Ratings.json.gz",'w')
for (user,product) in RATINGS.keys():
	PROD_count[product][user] = RATINGS[(user,product)]
	orfile.write(json.dumps((user,product,RATINGS[(user,product)]))+"\n")
orfile.close()
RATINGS = 0
print "all backed up, moving to next step"

#-------------------------------------------------------------------

def filename_of(doc_id):
	return "TFMATRIX/" + str(doc_id) + ".json.gz"

print "loading IDF matrix"
IDF = Counter(json.loads(gzip.open("IDFmatrix.json.gz",'r').readline()))
#VOCABULARY = sorted(IDF.keys())

outfile = gzip.open("Weights.json.gz",'w')
scorefile = gzip.open("Scores.json.gz",'w')

CORPUS = sorted(set([w.split(".")[0] for w in os.listdir("TFMATRIX")]) & set(PROD_count.keys()))

print "creating the two matrixes needed for rocchio"
outfile.write(json.dumps(CORPUS)+'\n')
scorefile.write(json.dumps(CORPUS)+'\n')
iterator = 0
for (filename,product) in [(filename_of(document),document) for document in CORPUS]:
	iterator += 1
	print "working on product: ", iterator ," out of ", PROD
	FREQ = Counter(json.loads(gzip.open(filename,'r').readline()))
	TF = Counter()
	max_freq = float(max(FREQ.values()))
	const = float(sqrt(sum([pow((TF[w]*IDF[w]),2) for w in FREQ])))
	#for word in VOCABULARY: # this will create vectors!
	for word in FREQ: # only words in document
		TF[word] = FREQ[word] / max_freq
		WEIGHT[word] = ((TF[word]*IDF[word])/const)
	outfile.write(json.dumps(WEIGHT)+'\n')
	scorefile.write(json.dumps(PROD_count[product])+'\n')
outfile.close()
scorefile.close()










