from sklearn.feature_extraction import DictVectorizer
from sklearn.neighbors import NearestCentroid
from sklearn import metrics
import datetime
# from ArtMAP.models import AdaptiveResonance
from ArtMAP.models import BookClusterer
from ArtMAP.models import UserClusterer
from collections import Counter, defaultdict
from scipy.sparse import hstack, vstack
import matplotlib.pyplot as plt
import json
import random
import gzip
from sklearn.cross_validation import train_test_split
__author__ = 'pstalidis'


# path1 = "/media/pstalidis/eecf9b83-9877-4f85-94b9-b0a4d07ade6b/home/radius/Downloads/ICT4Growth/ARTmap/"
# path2 = "/media/pstalidis/eecf9b83-9877-4f85-94b9-b0a4d07ade6b/home/radius/Downloads/ICT4Growth/BOOK_TF/"
path1 = "/home/panagiotis/Downloads/ICT4Growth/subset/"
path2 = "/home/panagiotis/Downloads/ICT4Growth/subset/BOOK_TF/"

# read data from files
user_ids = json.loads(open(path1+"userids.json").readline())
book_ids = json.loads(open(path1+"bookids.json").readline())
score_vectors = open(path1+"scorevectors.csv")
bookvec = zip(book_ids, [line.strip().split("\t") for line in score_vectors])

reversed_scores = defaultdict(Counter)
# mlabels = [0, 1, 2, 3, 4, 5]
mlabels = ["UNK", "NEG", "POS"]
for b in xrange(0, len(book_ids)):
    for u in xrange(0, len(user_ids)):
        # reversed_scores[book_ids[b]][user_ids[u]] = int(bookvec[b][1][u])
        if int(bookvec[b][1][u]) > 3:
            reversed_scores[book_ids[b]][user_ids[u]] = "POS"
        # elif int(bookvec[b][1][u]) == 0:
        #     reversed_scores[book_ids[b]][user_ids[u]] = "UNK"
        else:
            reversed_scores[book_ids[b]][user_ids[u]] = "NEG"

# hide some books
train_book_ids, test_book_ids = train_test_split(reversed_scores.keys(), test_size=0.3, random_state=0)

train_scores = defaultdict(Counter)
for b in train_book_ids:
    for u in reversed_scores[b].keys():
        train_scores[u][b] = reversed_scores[b][u]

test_scores = defaultdict(Counter)
for b in test_book_ids:
    for u in reversed_scores[b].keys():
        test_scores[u][b] = reversed_scores[b][u]

# from scores create vectors
train_user_ids = train_scores.keys()
user_vectors = []
for user in train_user_ids:
    user_vectors.append(dict(train_scores[user]))

book_descriptions = []
for book in book_ids:
    book_descriptions.append(json.loads(gzip.open(path2+book+".json.gz", 'rb').readline()))

uth = 0.7
bth = 0.9
ucls = UserClusterer()
ucls.fit(train_user_ids, user_vectors, threshold=uth)

bcls = BookClusterer(transform=DictVectorizer)
bcls.fit(book_ids, book_descriptions, threshold=bth)

print "user clustering done with", uth, "threshold and book clustering done with", bth

t0 = datetime.datetime.now()

# for each user cluster create a separate rocchio classifier and train it with positive and negative samples
classifiers = dict()
for user in train_user_ids:
    # cluster_id = ucls.recall(user)  # perhaps instead of using the id i should use the vector here!
    cluster_id = ucls.find_cluster(user_vectors[train_user_ids.index(user)])
    X_train = []
    y_train = []
    for book in train_book_ids:
        rating = train_scores[user][book]
        # if rating != "UNK":
        if True:
            X_train.append(bcls.predict(book_descriptions[train_book_ids.index(book)]))
            y_train.append(rating)
    clf = NearestCentroid(metric='cosine', shrink_threshold=None)
    try:
        clf.fit(vstack(X_train), y_train)
    except ValueError:
        print "ValueError: y has less than 2 classes"
    except AttributeError:
        print "AttributeError: Model can not be trained."
    classifiers[cluster_id] = clf

print "rocchio training took:", datetime.datetime.now() - t0
t0 = datetime.datetime.now()

# for each user call appropriate cluster rocchio and get recommendation
results = dict()
for user in train_user_ids:
    # cluster_id = ucls.recall(user)  # perhaps instead of using the id I should use the vector here!
    cluster_id = ucls.find_cluster(user_vectors[train_user_ids.index(user)])
    X_test = []
    y_test = []
    for book in test_book_ids:
        X_test.append(bcls.predict(book_descriptions[test_book_ids.index(book)]))
        y_test.append(test_scores[user][book])
    try:
        predicted = classifiers[cluster_id].predict(vstack(X_test))
    except AttributeError:
        print "AttributeError: Model has not been trained yet."
        predicted = ["UNK" for i in y_test]
    results[user] = (y_test, predicted)

print "testing took:", datetime.datetime.now() - t0
print "now printing results per user"

# show results per user and then the totals
total_true = []
total_pred = []
for user in results.keys():
    y_test, predicted = results[user]
    for t in y_test:
        total_true.append(t)
    for p in predicted:
        total_pred.append(p)
    # cm = metrics.confusion_matrix(y_true=y_test, y_pred=predicted, labels=mlabels)
    # print user, cm
    # plt.matshow(cm)
    # plt.title('Confusion matrix')
    # plt.colorbar()
    # plt.ylabel('True label')
    # plt.xlabel('Predicted label')
    # plt.show()

cm = metrics.confusion_matrix(y_true=total_true, y_pred=total_pred, labels=mlabels)
print "total results", cm
plt.matshow(cm)
plt.title('Confusion matrix')
plt.colorbar()
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.show()