from sklearn.feature_extraction import DictVectorizer
# from ArtMAP.models import AdaptiveResonance
from ArtMAP.models import BookClusterer
from ArtMAP.models import UserClusterer
from collections import Counter, defaultdict
import json
# import random
import gzip
# from sklearn.cross_validation import train_test_split
__author__ = 'pstalidis'


# path1 = "/media/pstalidis/eecf9b83-9877-4f85-94b9-b0a4d07ade6b/home/radius/Downloads/ICT4Growth/ARTmap/"
# path2 = "/media/pstalidis/eecf9b83-9877-4f85-94b9-b0a4d07ade6b/home/radius/Downloads/ICT4Growth/BOOK_TF/"
path1 = "/home/panagiotis/Downloads/ICT4Growth/subset/"
path2 = "/home/panagiotis/Downloads/ICT4Growth/subset/BOOK_TF/"

# read data from files
userids = json.loads(open(path1+"userids.json").readline())
bookids = json.loads(open(path1+"bookids.json").readline())
scorevectors = open(path1+"scorevectors.csv")
bookvec = zip(bookids, [line.strip().split("\t") for line in scorevectors])
scores = defaultdict(Counter)

for b in xrange(0, len(bookids)-1):
    for u in xrange(0, len(userids)-1):
        scores[userids[u]][bookids[b]] = int(bookvec[b][1][u])

# from scores create vectors
userids = scores.keys()
data = []
for user in userids:
    data.append(dict(scores[user]))
    scores[user] += Counter()

bookdesc = []
for book in bookids:
    bookdesc.append(json.loads(gzip.open(path2+book+".json.gz", 'rb').readline()))


# uid_train, uid_test, uvec_train, uvec_test = train_test_split(userids, data, test_size=0.4, random_state=0)
# bid_train, bid_test, bvec_train, bvec_test = train_test_split(bookids, bookdesc, test_size=0.4, random_state=0)

ucls = UserClusterer()
bcls = BookClusterer(transform=DictVectorizer)

ucls.fit_predict(userids, data, threshold=0.8)
bcls.fit(bookids, bookdesc)

tscores = defaultdict(lambda: defaultdict(list))
for u in userids:
    for b in bookids:
        tscores[ucls.recall(u)][bcls.recall(b)].append(scores[u][b])

nscores = defaultdict(Counter)
for u in tscores.keys():
    for b in tscores[u].keys():
        nscores[u][b] = sum(tscores[u][b])/len(tscores[u][b])

tscores = 0


for ucluster in nscores.keys():
    for bcluster in nscores[ucluster].keys():
        pass







"""
print "user clusters", len(set([ucls.recall(u) for u in userids]))
print "book clusters", len(set([bcls.recall(b) for b in bookids]))

print len(scores)

uid = userids[15]
cid = ucls.recall(uid)

original = ucls.transform(data[15])
returned = ucls.predict(data[15])
smth = ucls.inverse_transform(returned)[0]

nratings = Counter()
orat = Counter(smth)
for book in orat.keys():
    nratings[book] = round(orat[book], 1)
    print orat[book], "->", nratings[book]

print nratings.most_common(15)
print orat.most_common(15)
# print (original[0] - returned[0]).toarray()
print scores[userids[15]]['0460872702']
"""

