import numpy
from collections import defaultdict, Counter
__author__ = 'panagiotis'


class NNMF():
    def __init__(self, max_iterations=200, alpha=0.01, beta=0.001, tolerance=1e-4):
        self.max_iter = max_iterations
        self.alpha = alpha
        self.beta = beta
        self.e_tol = tolerance

    def __starting(self, shape, components, type="random"):
        self.N = shape[0]
        self.M = shape[1]
        self.C = components
        if type == "random":
            self._products = numpy.random.random((self.N, self.C))
            self._components = numpy.random.random((self.C, self.M))
        elif type == "ones":
            self._products = numpy.ones((self.N, self.C))
            self._components = numpy.ones((self.C, self.M))



class NMF():
    def __init__(self, n_components=None, max_iter=5000, beta=0.02, tol=0.0002, random_state=None):
        if n_components:
            self.n_components = n_components
        else:
            self.n_components = 0
        self.max_iter = max_iter
        self.beta = beta
        self.tol = tol
        self.state = random_state
        self.components_ = None
        self.products_ = None
        self.reconstruction_error_ = 0

    def matrix_factorization(self, R, P, Q, components=None, steps=5000, alpha=0.0002, beta=0.02):
        if components:
            K = components
        else:
            K = 5
        for step in xrange(steps):
            for i in xrange(len(R)):
                for j in xrange(len(R[i])):
                    if R[i][j] > 0:
                        eij = R[i][j] - numpy.dot(P[i, :], Q[:, j])
                        for k in xrange(K):
                            P[i][k] += alpha * (2 * eij * Q[k][j] - beta * P[i][k])
                            Q[k][j] += alpha * (2 * eij * P[i][k] - beta * Q[k][j])
            # eR = numpy.dot(P, Q)
            e = 0
            for i in xrange(len(R)):
                for j in xrange(len(R[i])):
                    if R[i][j] > 0:
                        e += pow(R[i][j] - numpy.dot(P[i, :], Q[:, j]), 2)
                        for k in xrange(K):
                            e += (beta/2) * (pow(P[i][k], 2) + pow(Q[k][j], 2))
            print e, step

            if e < 0.001:
                break
        return P, Q.T

    def fit_transform(self, X):
        if not hasattr(X, "shape"):
            X = numpy.array(X)
        self.products_ = numpy.ones((X.shape[0], self.n_components))
        self.components_ = numpy.random.random((self.n_components, X.shape[1]))

        R = X
        P = self.products_
        Q = self.components_

        for step in xrange(self.max_iter):
            print "progress", 100.0 * step / self.max_iter
            for i in xrange(R.shape[0]):
                for j in xrange(R.shape[1]):
                    if R[i][j] > 0:
                        eij = R[i][j] - numpy.dot(P[i, :], Q[:, j])
                        for k in xrange(self.n_components):
                            P[i][k] += self.tol * (2 * eij * Q[k][j] - self.beta * P[i][k])
                            Q[k][j] += self.tol * (2 * eij * P[i][k] - self.beta * Q[k][j])
            # eR = numpy.dot(P, Q)
            e = 0
            for i in xrange(len(R)):
                for j in xrange(len(R[i])):
                    if R[i][j] > 0:
                        e += pow(R[i][j] - numpy.dot(P[i, :], Q[:, j]), 2)
                        for k in xrange(self.n_components):
                            e += (self.beta/2) * (pow(P[i][k], 2) + pow(Q[k][j], 2))

            print e, step
            if e < 0.001:
                print "reached e"
                break
        self.components_ = Q
        self.reconstruction_error_ = e
        return P


# R = [[5, 3, 0, 1],
#      [4, 0, 0, 1],
#      [1, 1, 0, 5],
#      [1, 0, 0, 4],
#      [0, 1, 5, 4]]
#
# ratings = numpy.array(R)
# nmf = NMF(2)
# nP = nmf.fit_transform(ratings)
# nQ = nmf.components_
#
# nR = numpy.dot(nP, nQ)


path = "./dataset/"
dataset = "u116_b110"

choices = [0, 1, 2, 3, 4, 5]

v_users = [[int(float(w)) for w in row] for row in [line.strip().split("\t")
                                                    for line in open(path+dataset+"/scorevectors.csv", 'rb')]]
t_users = numpy.choose(v_users, choices)
rows = len(v_users)
cols = len(v_users[0])

ratings = defaultdict(Counter)

for row in xrange(0, rows):
    for col in xrange(0, cols):
        ratings[row][col] = t_users[row][col]
    ratings[row] += Counter()

all_data = []

from sklearn.cross_validation import train_test_split

for user in ratings.keys():
    for book in ratings[user].keys():
        all_data.append((user, book, ratings[user][book]))

training_set, testing_set = train_test_split(all_data, train_size=0.3, random_state=0)

training = defaultdict(Counter)
testing = defaultdict(Counter)

for item in training_set:
    training[item[0]][item[1]] = item[2]

training_set = []
for user in xrange(0, rows):
    vector = []
    for book in xrange(0, cols):
        vector.append(training[user][book])
    training_set.append(vector)

from datetime import datetime

t0 = datetime.now()

nmf = NMF(2)
nP = nmf.fit_transform(training_set)
nQ = nmf.components_

print "training completed in", datetime.now() - t0

nR = numpy.dot(nP, nQ)

correct = []
predicted = []

for item in testing_set:
    correct.append(item[2])
    predicted.append(nR[item[0]][item[1]])

from sklearn.metrics import classification_report, mean_absolute_error, mean_squared_error

print mean_absolute_error(correct, predicted)


