import numpy
from collections import defaultdict
from sklearn.cross_validation import train_test_split
__author__ = 'panagiotis'


# create_ids = [i for i in xrange(0, 5000)]

choices = ["Z", "N", "N", "N", "P", "P"]
# choices = [0, 1, 2, 3, 4, 5]
classes = sorted(list(set(choices)))

v_users = [[int(float(w)) for w in row] for row in [line.strip().split("\t") for line in open("scorevectors.csv", 'rb')]]
t_users = numpy.choose(v_users, choices)

v_books = [[float(w) for w in row] for row in [line.strip().split("\t") for line in open("weightvectors.csv", 'rb')]]

ratings = defaultdict(lambda: defaultdict(int))
for user in xrange(0, len(v_users)):
    for book in xrange(0, len(v_users[0])):
        if t_users[user][book] != choices[0]:
            ratings[user][book] = t_users[user][book]

all_user_vectors = []
all_book_vectors = []
all_results = []

for user in ratings.keys():
    for book in ratings[user].keys():
        all_user_vectors.append(v_users[user])
        all_book_vectors.append(v_books[book])
        all_results.append(ratings[user][book])

train_user_vectors, test_user_vectors, train_book_vectors, test_book_vectors, train_results, test_results = \
    train_test_split(all_user_vectors, all_book_vectors, all_results, random_state=0, test_size=0.3)


train_complex = numpy.hstack((train_user_vectors, train_book_vectors))
test_complex = numpy.hstack((test_user_vectors, test_book_vectors))

from sklearn.naive_bayes import MultinomialNB

clf = MultinomialNB()
clf.fit(X=train_complex, y=numpy.array(train_results))

res = clf.predict(X=test_complex)

from sklearn.metrics import confusion_matrix
from sklearn.metrics import f1_score

cm = confusion_matrix(test_results, res, labels=classes)
print cm
print f1_score(y_true=test_results, y_pred=res, average=None)

