from collections import defaultdict, Counter
import MySQLdb
import numpy
from datetime import datetime
from sklearn.cross_validation import KFold
from sklearn.neighbors import NearestCentroid
from sklearn.dummy import DummyClassifier
from sklearn.metrics import classification_report
__author__ = 'pstalidis'


conn = MySQLdb.connect(host="mysql.stalidis.com", user="ict4growth", passwd="stanford", db="ict4growth")
curs = conn.cursor()

curs.execute("""SELECT `ProductID`, `Discription` FROM `Books` ORDER BY `Books`.`Reviews` DESC LIMIT 500 , 100""")

book_ids = []
t_books = []
ratings = defaultdict(Counter)

from sklearn.feature_extraction.text import TfidfVectorizer

for (book_id, description) in curs:
    book_ids.append(book_id)
    t_books.append(description)

vectorizer = TfidfVectorizer(max_df=0.85, min_df=2)
v_books = vectorizer.fit_transform(t_books)

for (pos, book_id) in enumerate(book_ids):
    curs.execute("""SELECT `UserID`, `Stars` FROM `NReviews` WHERE `ProductID` = %s""", (book_id,))
    for (user_id, stars) in curs:
        ratings[user_id][pos] = int(float(stars))

del book_ids, t_books

users = ratings.keys()

from sklearn.feature_extraction import DictVectorizer

vectorizer = DictVectorizer()
v_users = vectorizer.fit_transform([dict(ratings[user]) for user in ratings.keys()])

choices = [0, -1, -1, -1, 1, 1]

del ratings
del users

t0 = datetime.now()

ratings = []
for user in xrange(0, v_users.shape[0]):
    for book in xrange(0, v_users.shape[1]):
        if v_users[user, book] != choices[0]:
            ratings.append({"user": user, "book": book, "score": v_users[user, book]})
ratings = numpy.array(ratings)

ten_fold = KFold(n=ratings.shape[0]-1, n_folds=10, shuffle=True)

results = {"predicted": [], "correct": []}
for train_indexes, test_indexes in ten_fold:
    per_user = defaultdict(lambda: {"train": [], "test": []})
    for d in ratings[train_indexes]:
        per_user[d["user"]]["train"].append((d["book"], d["score"]))
    for d in ratings[test_indexes]:
        per_user[d["user"]]["test"].append((d["book"], d["score"]))
    for user_id in per_user.keys():
        X_train = []
        y_train = []
        X_test = []
        y_test = []
        for (book_id, score) in per_user[user_id]["train"]:
            X_train.append(v_users[:, book_id])
            y_train.append(score)
        for (book_id, score) in per_user[user_id]["test"]:
            X_test.append(v_users[:, book_id])
            y_test.append(score)
        if (len(X_train) > 0) and (len(X_test) > 0):
            try:
                clf = NearestCentroid()
                clf.fit(X_train, y_train)
            except ValueError:
                clf = DummyClassifier(strategy="constant", constant=y_train[0])
                clf.fit(X_train, y_train)
            finally:
                results["predicted"] += clf.predict(numpy.vstack(X_test)).tolist()
                results["correct"] += y_test
        else:
            results["predicted"] += [0 for i in xrange(0, len(y_test))]
            results["correct"] += y_test

print "time taken", datetime.now() - t0
results['spredicted'] = [int(x) for x in results['predicted']]
results['scorrect'] = [int(x) for x in results['correct']]

results['predicted'] = numpy.choose(results['spredicted'], choices)
results['correct'] = numpy.choose(results['scorrect'], choices)

from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score
print "precision", precision_score(results['correct'], results['predicted'])
print "recall", recall_score(results['correct'], results['predicted'])
print "accuracy", accuracy_score(results['correct'], results['predicted'])
print "f-measure", f1_score(results['correct'], results['predicted'])

