from collections import defaultdict, Counter
import gzip, json, os, csv, hashlib, math, random, sys
from math import sqrt, log, log10, pow


try:
    BOOK_LIMIT = int(sys.argv[1])
except:
    BOOK_LIMIT = 300


def get_cosine(vec1, vec2):
    intersection = set(vec1.keys()) & set(vec2.keys())
    numerator = sum([vec1[x] * vec2[x] for x in intersection])
    sum1 = sum([vec1[x]**2 for x in vec1.keys()])
    sum2 = sum([vec2[x]**2 for x in vec2.keys()])
    denominator = math.sqrt(sum1) * math.sqrt(sum2)
    if not denominator:
        return 0.0
    else:
        return float(numerator) / denominator


def merge(vec1, vec2):
    result = Counter()
    for term in set(vec1.keys() + vec2.keys()):
        result[term] = float(vec1[term] + vec2[term]) / 2
    return result

print "reading data from local copy"
RATINGS = Counter()
BOOKS_WITH_REVIEWS = set()
orfile = gzip.open("ALL_Ratings.json.gz",'r')
buf = orfile.readline()
while buf != "":
    temp = json.loads(buf)
    RATINGS[(temp[0],temp[1])] = int(float(temp[2]))
    BOOKS_WITH_REVIEWS.add(temp[1])
    buf = orfile.readline()

orfile.close()

print "cleaning books without descriptions"
BOOKS_WITH_DESCRIPTION = set([filename.split(".")[0] for filename in os.listdir("TFMATRIX")])
REVIEWED_BOOKS_WITH_DESCRIPTION = BOOKS_WITH_DESCRIPTION & BOOKS_WITH_REVIEWS
BOOKS_TO_REMOVE = BOOKS_WITH_REVIEWS - REVIEWED_BOOKS_WITH_DESCRIPTION
CORPUS = BOOKS_WITH_REVIEWS - BOOKS_TO_REMOVE

PROD_count = defaultdict(Counter)
PROD = Counter()
for (u,p) in RATINGS.keys():
    PROD_count[p][u] += 1

for product in PROD_count:
    PROD[product] = len(PROD_count[product])

for product in BOOKS_TO_REMOVE:
    PROD[product] = 0
    for user in PROD_count[product]:
        RATINGS[(user, product)] = 0

#------------------------------------------------------------------------------------------------

print "trying to make sparse matrix denser"
print "keeping 300 most reviewed books"

PROD_count = defaultdict(Counter)
for (u,p) in RATINGS.keys():
    PROD_count[p][u] += 1

PROD = Counter()
for product in PROD_count:
    PROD[product] = len(PROD_count[product])

COMMON = set([c for (c,_) in PROD.most_common(BOOK_LIMIT)])

for product in (CORPUS - COMMON):
    PROD[product] = 0
    for user in PROD_count[product]:
        RATINGS[(user, product)] = 0

print "removing users with less than 5 reviews"
USER_count = defaultdict(Counter)
for (u,p) in RATINGS.keys():
    USER_count[u][p] += 1

USERS = Counter()
for user in USER_count:
    USERS[user] = len(USER_count[user])

LEAST = set([u for u in USERS if USERS[u] < 5])

for user in LEAST:
    for product in USER_count[user].keys():
        RATINGS[(user, product)] = 0



USER_count = PROD_count = 0

#------------------------------------------------------------------------------------------------

RATINGS += Counter()

USERS = Counter()
PROD = Counter()
SCORES = defaultdict(Counter)

print "creating backup"
orfile = gzip.open("Ratings.json.gz",'w')
for (user,product) in RATINGS.keys():
    SCORES[user][product] = RATINGS[(user,product)]
    orfile.write(json.dumps((user,product,RATINGS[(user,product)]))+"\n")
    USERS[user] += 1
    PROD[product] += 1

orfile.close()
USERS = sorted(USERS.keys())
BOOKS = sorted(PROD.keys())
NUM_OF_BOOKS = len(BOOKS)
NUM_OF_USERS = len(USERS)
print "Users: ", NUM_OF_USERS, " and Books: ", NUM_OF_BOOKS

TEST_USERS = []
for i in xrange(1,20):
    TEST_USERS.append(random.choice(USERS))

target = open("TEST_USERS.json",'w')
target.write(json.dumps(TEST_USERS)+"\n")
for user in TEST_USERS:
    target.write(json.dumps(SCORES[user])+"\n")
target.close()

RATINGS = 0
print "all backed up, moving to next step"

bookids = open("bookids.json",'w')
bookids.write(json.dumps(BOOKS))
bookids.close()

userids = open("userids.json",'w')
userids.write(json.dumps(USERS))
userids.close()

#------------------------------------------------------------------------------------------------


#------------------------------------------------------------------------------------------------

print "calculating user clusters"
USER_CLUSTERS = defaultdict(list)
USER_CENTROIDS = defaultdict(Counter)

for user in USERS:	# iterate through users
    temp = Counter()
    for centroid in USER_CENTROIDS:	# check user against all clusters
        temp[centroid]  = get_cosine(USER_CENTROIDS[centroid],SCORES[user])
    if len(temp) != 0:
        (c,m) = temp.most_common(1)[0]	# find the cluster with the maximum similarity
    else: m = 0
    if m > 0.75 :			# if similarity is more than 0.9 add user to the cluster
        USER_CLUSTERS[c].append(user)
        USER_CENTROIDS[c] = merge(USER_CENTROIDS[c],SCORES[user])
    else:				# else create new cluster
        USER_CLUSTERS[hashlib.md5(user).hexdigest()].append(user)
        USER_CENTROIDS[hashlib.md5(user).hexdigest()] = SCORES[user]

SCORES = defaultdict(Counter)
CLUSTERS = USER_CLUSTERS.keys()
clustfile = gzip.open("USER_CLUSTERS.json.gz",'w')
centrfile = gzip.open("USER_CENTROIDS.json.gz",'w')
centrfile.write(json.dumps(CLUSTERS)+"\n")
for cluster in CLUSTERS:
    clustfile.write(json.dumps(USER_CLUSTERS[cluster])+"\n")
    centrfile.write(json.dumps(USER_CENTROIDS[cluster])+"\n")
clustfile.close()
centrfile.close()

#------------------------------------------------------------------------------------------------
"""
print "creating the usercluster-book matrix needed for rocchio"

alternate = open("clusterscores.csv",'w')	# changed so that each line is a book!
fwriter = csv.writer(alternate, delimiter="\t", quotechar='"', quoting=csv.QUOTE_MINIMAL)
for cluster in USER_CENTROIDS:
    vector = []
    for product in BOOKS:
        vector.append(USER_CENTROIDS[cluster][product])
    fwriter.writerow(vector)
alternate.close()
"""
#------------------------------------------------------------------------------------------------

print "recalculating tfs"
iterator = 0
IDF = Counter()

for book in BOOKS:
    iterator += 1
    print "iteration ", iterator
    filename = "TFMATRIX/"+str(book)+".json.gz"
    FREQ = Counter(json.loads(gzip.open(filename,'r').readline()))
    if len(FREQ) == 0:
        os.remove(filename)
    else:
        MIN = min(FREQ.values())
        for word in FREQ:
            FREQ[word] = FREQ[word] / MIN
    for word in FREQ:
        if not all([w.isalpha() for w in word.split("-")]):
            FREQ[word] = 0
    FREQ += Counter()
    TF = Counter()
    S = sum(FREQ.values())
    if len(FREQ) == 0:
        M = 0
    else:
        M = max(FREQ.values())
    for word in FREQ:
        TF[word] = ( FREQ[word] / S ) / ( M / S )
        IDF[word] += 1
    filename = "BOOK_TF/"+str(book)+".json.gz"
    gzip.open(filename,'w').write(json.dumps(TF))

print "calculating idfs"
TERMS = sorted(IDF.keys())
N = float(len(BOOKS))
for word in TERMS:
    IDF[word] = N / float(IDF[word])

gzip.open("IDFmatrix.json.gz",'w').write(json.dumps(IDF))
#IDF = Counter(json.loads(gzip.open("IDFmatrix.json.gz",'r').readline()))

terms = open("terms.json",'w')
terms.write(json.dumps(TERMS))
terms.close()

#------------------------------------------------------------------------------------------------

print "calculating weights"
print "and clustering books"
#outfile = gzip.open("Weights.json.gz",'w')
PROD_CLUSTERS = defaultdict(list)
PROD_CENTROIDS = defaultdict(Counter)

iterator = 0
for book in BOOKS:
    Book_Vector = Counter()
    vector = []
    freqtor = []
    iterator += 1
    print "iteration ", iterator
    filename = "BOOK_TF/"+str(book)+".json.gz"
    TF = Counter(json.loads(gzip.open(filename,'r').readline()))
    const = sqrt(sum( [pow(TF[w],2)*log10(pow(IDF[w],2)) for w in TF] ))
    for word in TF:
        Book_Vector[word] = (TF[word] * log10(IDF[word])) / const
    temp = Counter()
    for centroid in PROD_CENTROIDS:	# check book against all clusters
        temp[centroid]  = get_cosine(PROD_CENTROIDS[centroid],Book_Vector)
    if len(temp) != 0:
        (c,m) = temp.most_common(1)[0]	# find the cluster with the maximum similarity
    else: m = 0
    if m > 0.9 :			# if similarity is more than 0.9 add user to the cluster
        PROD_CLUSTERS[c].append(book)
        PROD_CENTROIDS[c] = merge(PROD_CENTROIDS[c],Book_Vector)
    else:				# else create new cluster
        PROD_CLUSTERS[hashlib.md5(book).hexdigest()].append(book)
        PROD_CENTROIDS[hashlib.md5(book).hexdigest()] = Book_Vector
    filename = "BOOK_VECTORS/"+str(book)+".json.gz"
    gzip.open(filename,'w').write(json.dumps(Book_Vector))



CLUSTERS = PROD_CLUSTERS.keys()
clustfile = gzip.open("PROD_CLUSTERS.json.gz",'w')
centrfile = gzip.open("PROD_CENTROIDS.json.gz",'w')
centrfile.write(json.dumps(CLUSTERS)+"\n")
for cluster in CLUSTERS:
    clustfile.write(json.dumps(PROD_CLUSTERS[cluster])+"\n")
    centrfile.write(json.dumps(PROD_CENTROIDS[cluster])+"\n")
clustfile.close()
centrfile.close()


#------------------------------------------------------------------------------------------------
print "creating the bookcluster-usercluster matrix needed for rocchio"

SMTH = defaultdict(Counter)
BOOKID_CLUSTER = dict()
for book_cluster in PROD_CLUSTERS:
    for book in PROD_CLUSTERS[book_cluster]:
        BOOKID_CLUSTER[book] = book_cluster

for user_cluster in USER_CENTROIDS.keys():
    for book in USER_CENTROIDS[user_cluster]:
        book_cluster = BOOKID_CLUSTER[book]
        SMTH[user_cluster][book_cluster] += (float(USER_CENTROIDS[user_cluster][book]) / float(len(PROD_CLUSTERS[book_cluster])))

alternate = open("clusterscores.csv",'w')
fwriter = csv.writer(alternate, delimiter="\t", quotechar='"', quoting=csv.QUOTE_MINIMAL)
for book_cluster in PROD_CLUSTERS:
    vector = []
    for user_cluster in USER_CENTROIDS:
        vector.append(SMTH[user_cluster][book_cluster])
    fwriter.writerow(vector)
alternate.close()

#------------------------------------------------------------------------------------------------

print "creating the bookcluster-terms matrix needed for rocchio"

clustfile = gzip.open("PROD_CLUSTERS.json.gz",'w')
alternate = open("clusterbooks.csv",'w')	# each line is a cluster of books! each row is a term
fwriter = csv.writer(alternate, delimiter="\t", quotechar='"', quoting=csv.QUOTE_MINIMAL)
fwriter.writerow(TERMS)
for cluster in PROD_CENTROIDS:
    clustfile.write(json.dumps(PROD_CLUSTERS[cluster])+"\n")
    vector = []
    for word in TERMS:
        vector.append(PROD_CENTROIDS[cluster][word])
    fwriter.writerow(vector)
alternate.close()
clustfile.close()
#CLUSTERS = json.loads(gzip.open("USER_CLUSTERS.json.gz",'r').readline())
#[len(CLUSTERS[k]) for k in CLUSTERS.keys() if len(CLUSTERS[k]) > 1]


