import numpy, datetime
from collections import defaultdict
from sklearn.cross_validation import train_test_split
from sklearn.neighbors import NearestCentroid
from sklearn.metrics import confusion_matrix, f1_score
from sklearn.metrics import classification_report, mean_absolute_error, mean_squared_error
from ArtMAP.ArtMap import ArtMAP
__author__ = 'panagiotis'


from sklearn.svm import SVC
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.lda import LDA
from sklearn.qda import QDA
# SELECT CLASSIFIERS TO TEST
classifiers = [
    NearestCentroid(),
    # KNeighborsClassifier(3),
    # SVC(kernel="linear", C=0.025),
    # SVC(gamma=2, C=1),
    SVC(gamma=3, C=1),
    # DecisionTreeClassifier(max_depth=5),
    # RandomForestClassifier(max_depth=5, n_estimators=10, max_features=1),
    # AdaBoostClassifier(),
    # GaussianNB(),
    # LDA(),
    # QDA(),
    # SVR(kernel='rbf', C=1e3, gamma=0.1),
]

start_time = datetime.datetime.now()

#choices = [0, -1, -1, -1, 1, 1]
choices = [0, 1, 2, 3, 4, 5]
classes = sorted(list(set(choices)))

path = "./datasets/"
dataset = "jester/"

# LOAD DATASET
v_users = [[int(float(w)) for w in row] for row in [line.strip().split("\t")
                                                    for line in open(path+dataset+"scorevectors.csv", 'rb')]]
t_users = numpy.choose(v_users, choices)

v_books = [[float(w) for w in row] for row in [line.strip().split("\t")
                                               for line in open(path+dataset+"weightvectors.csv", 'rb')]]

# CREATE RATINGS MATRIX (or load from file)
ratings = defaultdict(lambda: defaultdict(int))
for user in xrange(0, len(v_users)):
    print user
    for book in xrange(0, len(v_users[0])):
        if t_users[user][book] != choices[0]:
            ratings[user][book] = t_users[user][book]

# PREPARE DATA FOR TRAIN-TEST SPLIT
starting_vectors = defaultdict(list)
for user in ratings.keys():
    print user
    for book in ratings[user].keys():
            starting_vectors['user'].append(v_users[user])
            starting_vectors['book'].append(v_books[book])
            starting_vectors['results'].append(ratings[user][book])

cross_validated = defaultdict(list)
from ArtMAP.ArtMap import AdaptiveClustering
clX= AdaptiveClustering(threshold=0.4)
clX.fit(numpy.array(v_users))
clY = AdaptiveClustering(threshold=1.0)
clY.fit(numpy.array(v_books))

# "STATE" DEFINES HOW MANY DIFFERENT CHOPS TO TRAIN-TEST DATA WILL BE
for state in xrange(0, 1):
    iteration_data = defaultdict(list)
    iteration_data['train_user_vectors'], iteration_data['test_user_vectors'], \
        iteration_data['train_book_vectors'], iteration_data['test_book_vectors'],\
        iteration_data['train_targets'], iteration_data['test_correct'] = \
        train_test_split(starting_vectors['user'], starting_vectors['book'],
                         starting_vectors['results'], random_state=state, test_size=0.3)

    for clf in classifiers:
        iteration_results = defaultdict(list)

        # CALL ArtMAP CLASSIFIER
        art = ArtMAP(classifier=clf)

        # PREFIT CLUSTERING ALGORITHMS WITH ALL USER AND BOOK DATA
        # this is where you select threshold for clustering
        print "number of users", len(v_users)
        art.X_cluster = clX
        print "number of user clusters", art.X_cluster.number_of_clusters()
        print "number of books", len(v_books)
        art.Y_cluster = clY
        art.fitted_X = True
        art.fitted_Y = True
        # FIT ArtMAP WITH TRAINING DATA
        art.fit(X=numpy.array(iteration_data['train_user_vectors']),
                Y=numpy.array(iteration_data['train_book_vectors']),
                z=iteration_data['train_targets'])
        print "fitted"

        # PREDICT RESULTS FOR THIS TEST DATA
        iteration_results['predicted'] = art.predict(X=numpy.array(iteration_data['test_user_vectors']),
                                                     Y=numpy.array(iteration_data['test_book_vectors']))

        iteration_results['correct'] = iteration_data['test_correct']

        # ADD ITERATION RESULTS TO TOTAL RESULTS
        cross_validated['correct'].append(iteration_results['correct'])
        cross_validated['predicted'].append(iteration_results['predicted'])
        # cross_validated['scores'].append(iteration_results['score'])
        print "-------------------------------------------------------"
        print "iteration", state, "results:"
        print classification_report(y_true=iteration_results['correct'], y_pred=numpy.array(iteration_results['predicted']))

# PRINT METRICS FOR ALL ITERATIONS TOGETHER
print "-------------------------------------------------------"
print "all together"
all_correct = []
for correct in cross_validated['correct']:
    all_correct += correct

all_predicted = []
for prediction in cross_validated['predicted']:
    all_predicted += prediction


print "mean absolute error", mean_absolute_error(all_correct, all_predicted)
print "mean square error", mean_squared_error(all_correct, all_predicted)
try:
    print "report:"
    print classification_report(y_true=all_correct, y_pred=numpy.array(all_predicted))
except:
    from sklearn.metrics import r2_score
    print "r2 score", r2_score(y_true=all_correct, y_pred=numpy.array(all_predicted))
finally:
    print str(datetime.datetime.now() - start_time)
