from sklearn.feature_extraction import DictVectorizer
from sklearn.neighbors import NearestCentroid
from sklearn import metrics
import datetime
# from ArtMAP.models import AdaptiveResonance
from ArtMAP.models import BookClusterer
from ArtMAP.models import UserClusterer
from collections import Counter, defaultdict
from scipy.sparse import hstack, vstack
import matplotlib.pyplot as plt
import json
import random
import gzip
from sklearn.cross_validation import train_test_split
__author__ = 'pstalidis'


# path1 = "/media/pstalidis/eecf9b83-9877-4f85-94b9-b0a4d07ade6b/home/radius/Downloads/ICT4Growth/ARTmap/"
# path2 = "/media/pstalidis/eecf9b83-9877-4f85-94b9-b0a4d07ade6b/home/radius/Downloads/ICT4Growth/BOOK_TF/"
path1 = "/home/panagiotis/Downloads/ICT4Growth/subset/"
path2 = "/home/panagiotis/Downloads/ICT4Growth/subset/BOOK_TF/"

# read data from files
user_ids = json.loads(open(path1+"userids.json").readline())
book_ids = json.loads(open(path1+"bookids.json").readline())
score_vectors = open(path1+"scorevectors.csv")
bookvec = zip(book_ids, [line.strip().split("\t") for line in score_vectors])

reversed_scores = defaultdict(Counter)
for b in xrange(0, len(book_ids)):
    for u in xrange(0, len(user_ids)):
        # reversed_scores[book_ids[b]][user_ids[u]] = int(bookvec[b][1][u])
        if int(bookvec[b][1][u]) > 3:
            reversed_scores[book_ids[b]][user_ids[u]] = 1
        elif int(bookvec[b][1][u]) == 0:
            reversed_scores[book_ids[b]][user_ids[u]] = 0
        else:
            reversed_scores[book_ids[b]][user_ids[u]] = -1

# hide some books
temp = reversed_scores.keys()
random.shuffle(temp)
train_book_ids = temp[:int(len(temp) * 0.7)]
test_book_ids = temp[int(len(temp) * 0.7):]
# train_book_ids, test_book_ids = train_test_split(book_ids, test_size=0.4, random_state=0)

train_scores = defaultdict(Counter)
for b in train_book_ids:
    for u in reversed_scores[b].keys():
        train_scores[u][b] = reversed_scores[b][u]

test_scores = defaultdict(Counter)
for b in test_book_ids:
    for u in reversed_scores[b].keys():
        test_scores[u][b] = reversed_scores[b][u]

# from scores create vectors
train_user_ids = train_scores.keys()
user_vectors = []
for user in train_user_ids:
    user_vectors.append(dict(train_scores[user]))
    train_scores[user] += Counter()

book_descriptions = []
for book in book_ids:
    book_descriptions.append(json.loads(gzip.open(path2+book+".json.gz", 'rb').readline()))


ucls = UserClusterer()
ucls.fit(train_user_ids, user_vectors, threshold=1.0)

bcls = BookClusterer(transform=DictVectorizer)
bcls.fit(book_ids, book_descriptions, threshold=1.0)

print "user clustering done with 0.8 threshold and book clustering done with 0.9"

# create test and training datasets for rocchio
t0 = datetime.datetime.now()
X_train = []
y_train = []

for user in train_user_ids:
    uvector = ucls.predict(user_vectors[train_user_ids.index(user)])
    # print uvector
    for book in train_book_ids:
        bvector = bcls.predict(book_descriptions[train_book_ids.index(book)])
        # print bvector
        tt = hstack((uvector, bvector))
        # print tt
        X_train.append(tt)
        y_train.append(train_scores[user][book])

print "created vectors for rocchio training in:", datetime.datetime.now() - t0

t0 = datetime.datetime.now()
X_test = []
y_test = []
for user in train_user_ids:
    uvector = ucls.predict(user_vectors[train_user_ids.index(user)])
    for book in test_book_ids:
        bvector = bcls.predict(book_descriptions[test_book_ids.index(book)])
        X_test.append(hstack((uvector, bvector)))
        y_test.append(test_scores[user][book])

print "created vectors for rocchio testing in:", datetime.datetime.now() - t0

X_train = vstack(X_train)
# y_train = vstack(y_train)
X_test = vstack(X_test)
# y_test = vstack(y_test)

# perform rocchio classification!
t0 = datetime.datetime.now()
clf = NearestCentroid(metric='cosine', shrink_threshold=None)
clf.fit(X_train, y_train)
print "rocchio training took:", datetime.datetime.now() - t0

predicted = clf.predict(X_test)
cm = metrics.confusion_matrix(y_true=y_test, y_pred=predicted, labels=[0, -1, 1])
print cm
plt.matshow(cm)
plt.title('Confusion matrix')
plt.colorbar()
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.show()