from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.base import ClusterMixin
from collections import defaultdict, Counter
import hashlib
__author__ = 'pstalidis'

def get_cosine(vec1, vec2):
    intersection = set(vec1.keys()) & set(vec2.keys())
    numerator = sum([vec1[x] * vec2[x] for x in intersection])
    sum1 = sum([vec1[x]**2 for x in vec1.keys()])
    sum2 = sum([vec2[x]**2 for x in vec2.keys()])
    denominator = math.sqrt(sum1) * math.sqrt(sum2)
    if not denominator:
        return 0.0
    else:
        return float(numerator) / denominator


def merge(vec1, vec2):
    result = Counter()
    for term in set(vec1.keys() + vec2.keys()):
        result[term] = float(vec1[term] + vec2[term]) / 2
    return result


class NewScriptToClass():
    def __init__(self):
        self.BOOK_LIMIT = 300
        self.RATINGS = Counter()
        self.BOOKS_WITH_REVIEWS = set()
        self.CORPUS = ""
        self.SCORES = defaultdict(Counter)
        self.USERS = ""
        self.BOOKS = ""
        self.USER_CLUSTERS = defaultdict(list)
        self.USER_CENTROIDS = defaultdict(Counter)
        self.IDF = Counter()

    def new_create_book_clusters(self, Books):
        # iterating through book vectors, create book clusters based on cosine similarity
        BookToCluster = defaultdict()
        BCentroids = defaultdict()
        for (book, vector) in Books:
            temp = Counter({book: 0.1})
            for clusterid in BCentroids.keys():
                temp[clusterid] = cosine_similarity(vector, BCentroids[clusterid])
            (clusterid, similarity) = temp.most_common(1)[0]
            if similarity > 0.75:
                BookToCluster[book] = clusterid
                BCentroids[clusterid] = (BCentroids[clusterid] + vector) / 2.0
            else:
                clusterid = hashlib.md5(book).hexdigest()
                BookToCluster[book] = clusterid
                BCentroids[clusterid] = vector

    def calculate_book_vectors(self, corpus):
        # from book descriptions calculate book vectors with tfidfvectorizer
        bookids = []
        data = []
        for (bookid, description) in corpus:
            bookids.append(bookid)
            data.append(description)
        vectors = TfidfVectorizer().fit_transform(data)
        Books = zip(bookids, vectors)
        bookids = data = vectors = 0


from collections import defaultdict, Counter
import gzip, json, os, csv, hashlib, math, random, sys
from math import sqrt, log, log10, pow


class ScriptToClass():
    def __init__(self):
        self.BOOK_LIMIT = 300
        self.RATINGS = Counter()
        self.BOOKS_WITH_REVIEWS = set()
        self.CORPUS = ""
        self.SCORES = defaultdict(Counter)
        self.USERS = ""
        self.BOOKS = ""
        self.USER_CLUSTERS = defaultdict(list)
        self.USER_CENTROIDS = defaultdict(Counter)
        self.IDF = Counter()

    def read_all_data(self):
        print "reading data from local copy"
        orfile = gzip.open("ALL_Ratings.json.gz", 'r')
        buf = orfile.readline()
        while buf != "":
            temp = json.loads(buf)
            self.RATINGS[(temp[0], temp[1])] = int(float(temp[2]))
            self.BOOKS_WITH_REVIEWS.add(temp[1])
            buf = orfile.readline()
        orfile.close()

    def remove_books_with_no_descriptions(self):
        print "cleaning books without descriptions"
        BOOKS_WITH_DESCRIPTION = set([filename.split(".")[0] for filename in os.listdir("TFMATRIX")])
        REVIEWED_BOOKS_WITH_DESCRIPTION = BOOKS_WITH_DESCRIPTION & self.BOOKS_WITH_REVIEWS
        BOOKS_TO_REMOVE = self.BOOKS_WITH_REVIEWS - REVIEWED_BOOKS_WITH_DESCRIPTION
        self.CORPUS = self.BOOKS_WITH_REVIEWS - BOOKS_TO_REMOVE
        PROD_count = defaultdict(Counter)
        PROD = Counter()
        for (u, p) in self.RATINGS.keys():
            PROD_count[p][u] += 1
        for product in PROD_count:
            PROD[product] = len(PROD_count[product])
        for product in BOOKS_TO_REMOVE:
            PROD[product] = 0
            for user in PROD_count[product]:
                self.RATINGS[(user, product)] = 0

    def densify_user_book_matrix(self):
        print "keeping only books with many reviews"
        PROD_count = defaultdict(Counter)
        for (u, p) in self.RATINGS.keys():
            PROD_count[p][u] += 1
        PROD = Counter()
        for product in PROD_count:
            PROD[product] = len(PROD_count[product])
        COMMON = set([c for (c, _) in PROD.most_common(self.BOOK_LIMIT)])
        for product in (self.CORPUS - COMMON):
            PROD[product] = 0
            for user in PROD_count[product]:
                self.RATINGS[(user, product)] = 0
        print "removing users with less than 5 reviews"
        USER_count = defaultdict(Counter)
        for (u, p) in self.RATINGS.keys():
            USER_count[u][p] += 1
        USERS = Counter()
        for user in USER_count:
            USERS[user] = len(USER_count[user])
        LEAST = set([u for u in USERS if USERS[u] < 5])
        for user in LEAST:
            for product in USER_count[user].keys():
                self.RATINGS[(user, product)] = 0
        USER_count = PROD_count = 0
        self.RATINGS += Counter()
        USERS = Counter()
        PROD = Counter()

        print "creating backup"
        orfile = gzip.open("Ratings.json.gz", 'w')
        for (user, product) in self.RATINGS.keys():
            self.SCORES[user][product] = self.RATINGS[(user, product)]
            orfile.write(json.dumps((user, product, self.RATINGS[(user, product)]))+"\n")
            USERS[user] += 1
            PROD[product] += 1
        orfile.close()
        self.USERS = sorted(USERS.keys())
        self.BOOKS = sorted(PROD.keys())
        NUM_OF_BOOKS = len(self.BOOKS)
        NUM_OF_USERS = len(self.USERS)
        print "Users: ", NUM_OF_USERS, " and Books: ", NUM_OF_BOOKS
        TEST_USERS = []
        for i in xrange(1, 20):
            TEST_USERS.append(random.choice(self.USERS))
        target = open("TEST_USERS.json", 'w')
        target.write(json.dumps(TEST_USERS)+"\n")
        for user in TEST_USERS:
            target.write(json.dumps(self.SCORES[user])+"\n")
        target.close()
        RATINGS = 0
        print "all backed up, moving to next step"
        bookids = open("bookids.json", 'w')
        bookids.write(json.dumps(self.BOOKS))
        bookids.close()
        userids = open("userids.json", 'w')
        userids.write(json.dumps(self.USERS))
        userids.close()

    def cluster_users(self):
        print "calculating user clusters"
        for user in self.USERS:	# iterate through users
            temp = Counter()
            for centroid in self.USER_CENTROIDS:	# check user against all clusters
                temp[centroid] = get_cosine(self.USER_CENTROIDS[centroid], self.SCORES[user])
            if len(temp) != 0:
                (c, m) = temp.most_common(1)[0]	 # find the cluster with the maximum similarity
            else: m = 0
            if m > 0.75 :			# if similarity is more than 0.9 add user to the cluster
                self.USER_CLUSTERS[c].append(user)
                self.USER_CENTROIDS[c] = merge(self.USER_CENTROIDS[c], self.SCORES[user])
            else:				# else create new cluster
                self.USER_CLUSTERS[hashlib.md5(user).hexdigest()].append(user)
                self.USER_CENTROIDS[hashlib.md5(user).hexdigest()] = self.SCORES[user]
        self.SCORES = defaultdict(Counter)
        CLUSTERS = self.USER_CLUSTERS.keys()
        clustfile = gzip.open("USER_CLUSTERS.json.gz", 'w')
        centrfile = gzip.open("USER_CENTROIDS.json.gz", 'w')
        centrfile.write(json.dumps(CLUSTERS)+"\n")
        for cluster in CLUSTERS:
            clustfile.write(json.dumps(self.USER_CLUSTERS[cluster])+"\n")
            centrfile.write(json.dumps(self.USER_CENTROIDS[cluster])+"\n")
        clustfile.close()
        centrfile.close()

    def recalculate_tfs(self):
        print "recalculating tfs"
        iterator = 0
        for book in self.BOOKS:
            iterator += 1
            print "iteration ", iterator
            filename = "TFMATRIX/"+str(book)+".json.gz"
            FREQ = Counter(json.loads(gzip.open(filename, 'r').readline()))
            if len(FREQ) == 0:
                os.remove(filename)
            else:
                MIN = min(FREQ.values())
                for word in FREQ:
                    FREQ[word] = FREQ[word] / MIN
            for word in FREQ:
                if not all([w.isalpha() for w in word.split("-")]):
                    FREQ[word] = 0
            FREQ += Counter()
            TF = Counter()
            S = sum(FREQ.values())
            if len(FREQ) == 0:
                M = 0
            else:
                M = max(FREQ.values())
            for word in FREQ:
                TF[word] = ( FREQ[word] / S ) / ( M / S )
                self.IDF[word] += 1
            filename = "BOOK_TF/"+str(book)+".json.gz"
            gzip.open(filename, 'w').write(json.dumps(TF))
        print "calculating idfs"
        self.TERMS = sorted(self.IDF.keys())
        N = float(len(self.BOOKS))
        for word in self.TERMS:
            self.IDF[word] = N / float(self.IDF[word])
        gzip.open("IDFmatrix.json.gz", 'w').write(json.dumps(self.IDF))
        #IDF = Counter(json.loads(gzip.open("IDFmatrix.json.gz",'r').readline()))
        terms = open("terms.json", 'w')
        terms.write(json.dumps(self.TERMS))
        terms.close()

    def calculate_weights_and_cluster_books(self):
        print "calculating weights"
        print "and clustering books"
        #outfile = gzip.open("Weights.json.gz",'w')
        self.PROD_CLUSTERS = defaultdict(list)
        self.PROD_CENTROIDS = defaultdict(Counter)
        iterator = 0
        for book in self.BOOKS:
            Book_Vector = Counter()
            vector = []
            freqtor = []
            iterator += 1
            print "iteration ", iterator
            filename = "BOOK_TF/"+str(book)+".json.gz"
            TF = Counter(json.loads(gzip.open(filename, 'r').readline()))
            const = sqrt(sum([pow(TF[w], 2)*log10(pow(self.IDF[w], 2)) for w in TF]))
            for word in TF:
                Book_Vector[word] = (TF[word] * log10(self.IDF[word])) / const
            temp = Counter()
            for centroid in self.PROD_CENTROIDS:	 # check book against all clusters
                temp[centroid] = get_cosine(self.PROD_CENTROIDS[centroid], Book_Vector)
            if len(temp) != 0:
                (c, m) = temp.most_common(1)[0]	# find the cluster with the maximum similarity
            else:
                m = 0
            if m > 0.9:			# if similarity is more than 0.9 add user to the cluster
                self.PROD_CLUSTERS[c].append(book)
                self.PROD_CENTROIDS[c] = merge(self.PROD_CENTROIDS[c], Book_Vector)
            else:				# else create new cluster
                self.PROD_CLUSTERS[hashlib.md5(book).hexdigest()].append(book)
                self.PROD_CENTROIDS[hashlib.md5(book).hexdigest()] = Book_Vector
            filename = "BOOK_VECTORS/"+str(book)+".json.gz"
            gzip.open(filename, 'w').write(json.dumps(Book_Vector))
        CLUSTERS = self.PROD_CLUSTERS.keys()
        clustfile = gzip.open("PROD_CLUSTERS.json.gz", 'w')
        centrfile = gzip.open("PROD_CENTROIDS.json.gz", 'w')
        centrfile.write(json.dumps(CLUSTERS)+"\n")
        for cluster in CLUSTERS:
            clustfile.write(json.dumps(self.PROD_CLUSTERS[cluster])+"\n")
            centrfile.write(json.dumps(self.PROD_CENTROIDS[cluster])+"\n")
        clustfile.close()
        centrfile.close()

    def original_script(self):
        print "creating the bookcluster-usercluster matrix needed for rocchio"
        SMTH = defaultdict(Counter)
        BOOKID_CLUSTER = dict()
        for book_cluster in self.PROD_CLUSTERS:
            for book in self.PROD_CLUSTERS[book_cluster]:
                BOOKID_CLUSTER[book] = book_cluster
        for user_cluster in self.USER_CENTROIDS.keys():
            for book in self.USER_CENTROIDS[user_cluster]:
                book_cluster = BOOKID_CLUSTER[book]
                SMTH[user_cluster][book_cluster] += (float(self.USER_CENTROIDS[user_cluster][book]) / float(len(self.PROD_CLUSTERS[book_cluster])))
        alternate = open("clusterscores.csv", 'w')
        fwriter = csv.writer(alternate, delimiter="\t", quotechar='"', quoting=csv.QUOTE_MINIMAL)
        for book_cluster in self.PROD_CLUSTERS:
            vector = []
            for user_cluster in self.USER_CENTROIDS:
                vector.append(SMTH[user_cluster][book_cluster])
            fwriter.writerow(vector)
        alternate.close()
        print "creating the bookcluster-terms matrix needed for rocchio"
        clustfile = gzip.open("PROD_CLUSTERS.json.gz", 'w')
        alternate = open("clusterbooks.csv", 'w')	# each line is a cluster of books! each row is a term
        fwriter = csv.writer(alternate, delimiter="\t", quotechar='"', quoting=csv.QUOTE_MINIMAL)
        fwriter.writerow(self.TERMS)
        for cluster in self.PROD_CENTROIDS:
            clustfile.write(json.dumps(self.PROD_CLUSTERS[cluster])+"\n")
            vector = []
            for word in self.TERMS:
                vector.append(self.PROD_CENTROIDS[cluster][word])
            fwriter.writerow(vector)
        alternate.close()
        clustfile.close()
