import numpy, datetime
from collections import defaultdict
from sklearn.cross_validation import train_test_split
from sklearn.naive_bayes import MultinomialNB
from sklearn.svm import SVC
from sklearn.neighbors import NearestCentroid
from sklearn.metrics import confusion_matrix, f1_score
from sklearn.metrics import classification_report
__author__ = 'panagiotis'


# create_ids = [i for i in xrange(0, 5000)]
start_time = datetime.datetime.now()

# choices = ["Z", "N", "N", "N", "P", "P"]
choices = [0, 1, 2, 3, 4, 5]
classes = sorted(list(set(choices)))

v_users = [[int(float(w)) for w in row] for row in [line.strip().split("\t") for line in open("scorevectors.csv", 'rb')]]
t_users = numpy.choose(v_users, choices)

v_books = [[float(w) for w in row] for row in [line.strip().split("\t") for line in open("weightvectors.csv", 'rb')]]

targets = defaultdict(lambda: defaultdict(int))

from ArtMAP.models import AdaptiveResonance
# clustering of users
cl1 = AdaptiveResonance(shrink_threshold=1.0)
cl1.fit(v_users)
# clustering of books
cl2 = AdaptiveResonance(shrink_threshold=1.0)
cl2.fit(v_books)
# clustering of ratings!
cl_ratings = defaultdict(lambda: defaultdict(list))
for user in xrange(0, len(v_users)):
    for book in xrange(0, len(v_users[0])):
        if t_users[user][book] != choices[0]:
            targets[user][book] = t_users[user][book]
            cl_ratings[cl1.predict(v_users[user])[0]][cl2.predict(v_books[book])[0]].append(t_users[user][book])
ratings = defaultdict(lambda: defaultdict(int))
for user_cluster in ratings.keys():
    for book_cluster in ratings[user_cluster].keys():
        ratings[user_cluster][book_cluster] = round(float(sum(cl_ratings[user_cluster][book_cluster]))
                                                    / float(len(cl_ratings[user_cluster][book_cluster])))


starting_vectors = defaultdict(list)
for user in targets.keys():
        for book in targets[user].keys():
            starting_vectors['user_vector'].append(v_users[user])
            starting_vectors['u_cluster_vector'].append(cl1.transform(v_users[user])[0])
            starting_vectors['book_vector'].append(v_books[book])
            starting_vectors['b_cluster_vector'].append(cl2.transform(v_books[book])[0])
            starting_vectors['rating'].append(ratings[cl1.predict(v_users[user])[0]][cl2.predict(v_books[book])[0]])
            starting_vectors['target'].append(targets[user][book])

for state in xrange(0, 30):
    iteration_vectors = defaultdict(list)
    iteration_vectors['train_user_vector'], iteration_vectors['test_user_vector'],\
        iteration_vectors['train_u_cluster_vector'], iteration_vectors['test_u_cluster_vector'],\
        iteration_vectors['train_book_vector'], iteration_vectors['test_book_vector'],\
        iteration_vectors['train_b_cluster_vector'], iteration_vectors['test_b_cluster_vector'],\
        iteration_vectors['train_rating'], iteration_vectors['test_rating'],\
        iteration_vectors['train_target'], iteration_vectors['test_target'] = \
        train_test_split(starting_vectors['user_vector'], starting_vectors['u_cluster_vector'],
                         starting_vectors['book_vector'], starting_vectors['b_cluster_vector'],
                         starting_vectors['rating'], starting_vectors['target'],
                         random_state=state, test_size=0.3)

    iteration_results = defaultdict(list)
    for user_cluster in []:

        clf = NearestCentroid()

        # clf = SVC(gamma=3, C=1)
        clf.fit(X=this_user['train_book_vectors'], y=this_user['train_targets'])
        this_user['predicted'] = clf.predict(X=this_user['test_book_vectors']).tolist()
        iteration_results['correct'] += this_user['test_correct']
        iteration_results['predicted'] += this_user['predicted']
        # unrated_predictions = clf.predict(v_books)
    iteration_results['f1_scores'] = f1_score(y_true=iteration_results['correct'],
                                              y_pred=iteration_results['predicted'],
                                              average=None)
    iteration_results['confusion'] = confusion_matrix(y_true=iteration_results['correct'],
                                                      y_pred=iteration_results['predicted'])
    iteration_results['report'] = classification_report(y_true=iteration_results['correct'],
                                                        y_pred=iteration_results['predicted'])
    cross_validated['correct'].append(iteration_results['correct'])
    cross_validated['predicted'].append(iteration_results['predicted'])
    cross_validated['f1_scores'].append(iteration_results['f1_scores'])
    cross_validated['confusion'].append(iteration_results['confusion'])
    cross_validated['report'].append(iteration_results['report'])


all_correct = []
for correct in cross_validated['correct']:
    all_correct += correct

all_predicted = []
for prediction in cross_validated['predicted']:
    all_predicted += prediction

for f1score in cross_validated['report']:
   print f1score

print classification_report(y_true=all_correct, y_pred=all_predicted)

print str(datetime.datetime.now() - start_time)
