import numpy, datetime
from collections import defaultdict
from sklearn.cross_validation import train_test_split
from sklearn.svm import SVC
from sklearn.neighbors import NearestCentroid
from sklearn.dummy import DummyClassifier
from scipy import sparse
__author__ = 'panagiotis'


path = "/home/pstalidis/Projects/ICT4Growth/dataset/"
datasets = ["u30_b1647"]   # , ,"u304_b110/""u116_b110/"
filename = "/scorevectors.csv"
classifiers = [NearestCentroid(metric="cosine")]

def KFold(n, n_folds, test_size=0.25, train_size=None):
    for i in xrange(0, n_folds):
        yield train_test_split([x for x in xrange(0, n)], random_state=i, test_size=test_size, train_size=train_size)

results = defaultdict(lambda: defaultdict(dict))


a = sparse.coo_matrix([1,2,3,4,0,0,2,3,0,0,5])

for dataset in datasets:
    for classifier in classifiers:
        start_time = datetime.datetime.now()
        v_users = sparse.csr_matrix([[int(float(w)) for w in row] for row in [line.strip().split("\t") for line in open(path+dataset+"/scorevectors.csv", 'rb')]])
        v_books = sparse.csr_matrix([[float(w) for w in row] for row in [line.strip().split("\t") for line in open(path+dataset+"/weightvectors.csv", 'rb')]])
        # v_books = v_users.T
        # v_users = numpy.choose(v_users, [0, -1, -1, -1, 1, 1])

        ratings = []
        for user in xrange(0, v_users.shape[0]):
            for book in xrange(0, v_users.shape[1]):
                if v_users[user, book] != 0:
                    ratings.append({"user": user, "book": book, "score": v_users[user, book]})
        ratings = numpy.array(ratings)

        results[dataset][classifier.__repr__()]["predicted"] = []
        results[dataset][classifier.__repr__()]["correct"] = []
        for train_indexes, test_indexes in KFold(n=ratings.shape[0], n_folds=30, test_size=0.1, train_size=0.9):
            per_user = defaultdict(lambda: {"train": [], "test": []})
            for d in ratings[train_indexes]:
                per_user[d["user"]]["train"].append((d["book"], d["score"]))
            for d in ratings[test_indexes]:
                per_user[d["user"]]["test"].append((d["book"], d["score"]))
            for user_id in per_user.keys():
                X_train = []
                y_train = []
                X_test = []
                y_test = []
                for (book_id, score) in per_user[user_id]["train"]:
                    X_train.append(v_users[:, book_id])
                    y_train.append(score)
                for (book_id, score) in per_user[user_id]["test"]:
                    X_test.append(v_users[:, book_id])
                    y_test.append(score)
                if (len(X_train) > 0) and (len(X_test) > 0):
                    try:
                        clf = classifier
                        clf.fit(X_train, y_train)
                    except ValueError:
                        clf = DummyClassifier(strategy="constant", constant=y_train[0])
                        clf.fit(X_train, y_train)
                    finally:
                        results[dataset][classifier.__repr__()]["predicted"] += clf.predict(numpy.vstack(X_test)).tolist()
                        results[dataset][classifier.__repr__()]["correct"] += y_test
                else:
                    results[dataset][classifier.__repr__()]["predicted"] += [0 for i in xrange(0, len(y_test))]
                    results[dataset][classifier.__repr__()]["correct"] += y_test
        results[dataset][classifier.__repr__()]["time"] = datetime.datetime.now() - start_time

from ICT4Growth.ArtMAP2 import ReadaptiveClustering as AdaptiveClustering
from ICT4Growth.ArtMAP2 import ArtMAP

for dataset in datasets:
    start_time = datetime.datetime.now()
    v_users = numpy.array([[int(float(w)) for w in row] for row in [line.strip().split("\t") for line in open(path+dataset+"scorevectors.csv", 'rb')]])
    # v_books = numpy.array([[float(w) for w in row] for row in [line.strip().split("\t") for line in open(path+dataset+"weightvectors.csv", 'rb')]])
    v_books = v_users.T
    # v_users = numpy.choose(v_users, [0, -1, -1, -1, 1, 1])

    ratings = []
    for user in xrange(0, v_users.shape[0]):
        for book in xrange(0, v_users.shape[1]):
            if v_users[user, book] != 0:
                ratings.append({"user": user, "book": book, "score": v_users[user, book]})
    ratings = numpy.array(ratings)

    clY = AdaptiveClustering(threshold=0.95)
    clY.fit(v_books)
    clX = AdaptiveClustering(threshold=0.89)
    clX.fit(v_users)

    for classifier in classifiers:
        results[dataset]["ArtMAP "+classifier.__repr__()]["predicted"] = []
        results[dataset]["ArtMAP "+classifier.__repr__()]["correct"] = []
        for train_indexes, test_indexes in KFold(n=ratings.shape[0], n_folds=30, test_size=0.1, train_size=0.9):
            clf = classifier
            art = ArtMAP(clf, [0])
            art.X_cluster = clX
            art.Y_cluster = clY
            art.fitted_X = True
            art.fitted_Y = True

            X_train = v_users[[s["user"] for s in ratings[train_indexes]]]
            Y_train = v_books[[s["book"] for s in ratings[train_indexes]]]
            z_train = [s["score"] for s in ratings[train_indexes]]
            print "calling fit method"
            art.fit(X_train, Y_train, z_train)
            # del X_train, Y_train, z_train, train_indexes

            print "preparing to predict"
            X_test = v_users[[s["user"] for s in ratings[test_indexes]]]
            Y_test = v_books[[s["book"] for s in ratings[test_indexes]]]
            z_test = [s["score"] for s in ratings[test_indexes]]
            results[dataset]["ArtMAP "+classifier.__repr__()]["predicted"] += art.predict(X_test, Y_test)
            results[dataset]["ArtMAP "+classifier.__repr__()]["correct"] += z_test
            # del X_test, Y_test, z_test, test_indexes

        results[dataset]["ArtMAP "+classifier.__repr__()]["time"] = datetime.datetime.now() - start_time

from sklearn.metrics import mean_absolute_error
from sklearn.metrics import classification_report

for d in results.keys():
    for c in results[d].keys():
        print 20*"-"
        print "with data from", d
        print "using", c, "classifier"
        print "MAE:", mean_absolute_error(results[d][c]["correct"], results[d][c]["predicted"])
        print "time to compute", results[d][c]["time"]
        print 20*"-"
#
# for d in results.keys():
#     for c in results[d].keys():
#         print classification_report(results[d][c]["correct"], results[d][c]["predicted"])
#
