import MySQLdb
import json
import csv
import nltk
import enchant
from collections import defaultdict, Counter
from sklearn.feature_extraction.text import TfidfVectorizer
import numpy as np
__author__ = 'panagiotis'


def tokens(txt):
    stemmer = nltk.stem.PorterStemmer()
    spellcheck = enchant.Dict("en_US")
    return [stemmer.stem(word) for word in nltk.word_tokenize(txt) if (spellcheck.check(word)) and (word.isalpha())]


conn = MySQLdb.connect(host="mysql.stalidis.com", user="ict4growth", passwd="stanford", db="ict4growth")
curs = conn.cursor()

descriptions = defaultdict(str)
curs.execute("""SELECT `ProductID`, `Discription` FROM `Books` B WHERE ((B.`Reviews` < 1200) AND (B.`Reviews` > 800)) LIMIT 0,110""")
for (product, description) in curs:
    descriptions[product] = description

book_ids = sorted(list(descriptions.keys()))
ratings = defaultdict(Counter)

for book_id in book_ids:
    curs.execute("""SELECT `UserID`, `Stars` FROM `NReviews` WHERE `ProductID` = %s""", (book_id,))
    for (user_id, stars) in curs:
        ratings[user_id][book_id] = int(float(stars))

user_ids = ratings.keys()

percent = Counter()
for user_id in user_ids:
    if len(ratings[user_id]) > 10:
        neg = len([book_id for book_id in ratings[user_id] if ratings[user_id][book_id] < 4])
        pos = len([book_id for book_id in ratings[user_id] if ratings[user_id][book_id] > 3])
        percent[user_id] = float(pos) / (pos + neg)
    else:
        percent[user_id] = 0

percent += Counter()
decent = [(u, p) for (u, p) in percent.most_common() if (p < 0.7) and (p > 0.3)]
# num_of_users = 30
# user_ids = [u for (u, p) in decent[(len(decent)-num_of_users)/2:((len(decent)-num_of_users)/2)+num_of_users]]
user_ids = [u for (u, p) in decent]

print len(user_ids)  # see how many users i got

# -------------------------------------------------

print "creating the user-book matrix needed for rocchio"

alternate = open("./datasets/u116_b100/scorevectors.csv", 'w')   # changed so that each line is a book!
fwriter = csv.writer(alternate, delimiter="\t", quotechar='"', quoting=csv.QUOTE_MINIMAL)
for user in user_ids:
    vector = []
    for book in book_ids:
        vector.append(ratings[user][book])
    fwriter.writerow(vector)
alternate.close()

alternate = open("./datasets/u116_b100/Tscorevectors.csv", 'w')   # changed so that each line is a book!
fwriter = csv.writer(alternate, delimiter="\t", quotechar='"', quoting=csv.QUOTE_MINIMAL)
for book in book_ids:
    vector = []
    for user in user_ids:
        vector.append(ratings[user][book])
    fwriter.writerow(vector)
alternate.close()

# -------------------------------------------------

# clf = TfidfVectorizer(tokenizer=tokens)
# book_vectors = clf.fit_transform(temp)

# np.savetxt("bookvectors.csv", book_vectors.toarray(), delimiter="\t")

print "recalculating tfs"
iterator = 0
IDF = Counter()
TFS = defaultdict(dict)
for book in book_ids:
    iterator += 1
    print "iteration ", iterator
    tok = tokens(descriptions[book])
    FREQ = Counter(tok)
    MIN = min(FREQ.values())
    for word in FREQ:
        FREQ[word] /= float(MIN)

    FREQ += Counter()
    TF = Counter()
    S = float(sum(FREQ.values()))
    if len(FREQ) == 0:
        M = 0
    else:
        M = max(FREQ.values())
    for word in FREQ:
        if (M != 0) and (S != 0):
            TF[word] = (FREQ[word] / S) / (M / S)
            IDF[word] += 1
    TFS[book] = TF
    # filename = "BOOK_TF/"+str(book)+".json.gz"
    # gzip.open(filename, 'w').write(json.dumps(TF))

print "calculating idfs"
TERMS = sorted(IDF.keys())
N = float(len(book_ids))
for word in TERMS:
    IDF[word] = N / float(IDF[word])

# gzip.open("IDFmatrix.json.gz",'w').write(json.dumps(IDF))
# IDF = Counter(json.loads(gzip.open("IDFmatrix.json.gz",'r').readline()))
#
# terms = open("terms.json",'w')
# terms.write(json.dumps(TERMS))
# terms.close()

# ------------------------------------------------------------------------------------------------
import math
print "calculating weights"

weightfile = open("./datasets/u116_b100/weightvectors.csv", 'w')
weightwriter = csv.writer(weightfile, delimiter="\t", quotechar='"', quoting=csv.QUOTE_MINIMAL)
freqfile = open("./datasets/u116_b100/frequencies.csv", 'w')
freqwriter = csv.writer(freqfile, delimiter="\t", quotechar='"', quoting=csv.QUOTE_MINIMAL)
iterator = 0
for book in book_ids:
    WEIGHT = Counter()
    vector = []
    freqtor = []
    iterator += 1
    print "iteration ", iterator
    # filename = "BOOK_TF/"+str(book)+".json.gz"
    # TF = Counter(json.loads(gzip.open(filename,'r').readline()))
    TF = TFS[book]
    const = math.sqrt(sum([pow(TF[w], 2)*math.log10(pow(IDF[w], 2)) for w in TF]))
    for word in TF:
        WEIGHT[word] = (TF[word] * math.log10(IDF[word])) / const
    # SCORES[book] = WEIGHT
    for word in TERMS:
        vector.append(WEIGHT[word])
        freqtor.append(TF[word])
    weightwriter.writerow(vector)
    freqwriter.writerow(freqtor)

weightfile.close()
freqfile.close()
