from collections import defaultdict, Counter
import MySQLdb
import numpy
from datetime import datetime
from sklearn.cross_validation import KFold
from sklearn.neighbors import NearestCentroid
from sklearn.svm import SVC
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction import DictVectorizer
__author__ = 'pstalidis'
from ICT4Growth.ArtMAP2 import ArtMAP, ReadaptiveClustering


conn = MySQLdb.connect(host="mysql.stalidis.com", user="ict4growth", passwd="stanford", db="ict4growth")
curs = conn.cursor()

curs.execute("""SELECT `ProductID`, `Discription` FROM `Books` ORDER BY `Books`.`Reviews` DESC LIMIT 500 , 100""")

book_ids = []
t_books = []
ratings = defaultdict(Counter)

for (book_id, description) in curs:
    book_ids.append(book_id)
    t_books.append(description)

vectorizer = TfidfVectorizer(max_df=0.85, min_df=2)
v_books = vectorizer.fit_transform(t_books)

for (pos, book_id) in enumerate(book_ids):
    curs.execute("""SELECT `UserID`, `Stars` FROM `NReviews` WHERE `ProductID` = %s""", (book_id,))
    for (user_id, stars) in curs:
        if int(float(stars)) >= 4:
            ratings[user_id][pos] = 1
        else:
            ratings[user_id][pos] = -1

del book_ids, t_books

users = ratings.keys()

vectorizer = DictVectorizer()
v_users = vectorizer.fit_transform([dict(ratings[user]) for user in ratings.keys()])


del ratings
del users

t0 = datetime.now()

ratings = []
for user in xrange(0, v_users.shape[0]):
    for book in xrange(0, v_users.shape[1]):
        if v_users[user, book] != 0:
            ratings.append({"user": user, "book": book, "score": v_users[user, book]})
ratings = numpy.array(ratings)

clX = ReadaptiveClustering(threshold=0.9999)
clX.fit(v_users)
clY = ReadaptiveClustering(threshold=0.99)
clY.fit(v_books)

print "fitted both dimensions"

ten_fold = KFold(n=ratings.shape[0]-1, n_folds=10, shuffle=True)
from scipy import sparse
results = {"predicted": [], "correct": [], "train_pr": [], "train_co": []}
from sklearn.svm import SVR
for train_indexes, test_indexes in ten_fold:
    clf = NearestCentroid()
    art = ArtMAP(clf, [0])
    art.X_cluster = clX
    art.Y_cluster = clY
    art.fitted_X = True
    art.fitted_Y = True

    X_train = []
    Y_train = []
    z_train = []
    X_test = []
    Y_test = []
    z_test = []
    print "splitting dataset"
    for score in ratings[train_indexes]:
        X_train.append(v_users[score['user']])
        Y_train.append(v_books[score['book']])
        z_train.append(score['score'])
    print "calling fit method"
    art.fit(sparse.vstack(X_train), sparse.vstack(Y_train), z_train, debug=True)

    for score in ratings[test_indexes]:
        X_test.append(v_users[score['user']])
        Y_test.append(v_books[score['book']])
        z_test.append(score['score'])

    results["predicted"] += art.predict(sparse.vstack(X_test), sparse.vstack(Y_test))
    results["correct"] += z_test
    results["train_pr"] += art.predict(sparse.vstack(X_train), sparse.vstack(Y_train))
    results["train_co"] += z_train

print "time taken", datetime.now() - t0
#results['ss_predicted'] = numpy.choose([int(x) for x in results['predicted']], choices)
#results['ss_correct'] = numpy.choose([int(x) for x in results['correct']], choices)

from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score
print "training", f1_score(results['train_co'], results['train_pr'])
results['tcl_pr'] = []
for i in results['predicted']:
    if i > 0:
        results['tcl_pr'].append(1.0)
    else:
        results['tcl_pr'].append(-1.0)
print "precision", precision_score(results['correct'], results['tcl_pr'])
print "recall", recall_score(results['correct'], results['tcl_pr'])
print "accuracy", accuracy_score(results['correct'], results['tcl_pr'])
print "f-measure", f1_score(results['correct'], results['tcl_pr'])


print "precision", precision_score(results['train_co'], results['train_pr'])
print "recall", recall_score(results['train_co'], results['train_pr'])
print "accuracy", accuracy_score(results['train_co'], results['train_pr'])
print "f-measure", f1_score(results['train_co'], results['train_pr'])

