import os
from random import shuffle, seed
import json
import numpy as np
from sklearn.feature_extraction.text import TfidfVectorizer
import hashlib
import pickle
from utils import Config, safe_pickle_dump

def encode_dict(d):

  if isinstance(d, dict):
    j = {}
    for k in d.keys():
      j[k] = encode_dict(d[k])
    return j
  elif isinstance(d, list):
    l = []
    for k in d:
      l.append(encode_dict(k))
    return l
  else:
    return d

seed(1024)

'''
db = open(Config.raw_news_path, 'rb').readlines()

new_db = {}
cnt = 0
for line in db:
	try:
		#print line
		data = json.loads(line)
		if len(data['content']) < 50:
			continue
		hash_md5 = hashlib.md5(data['title'].encode('utf-8'))
		new_db[hash_md5.hexdigest()] = encode_dict(data)
		cnt += 1
		#print cnt
	except:
		pass


safe_pickle_dump(new_db, Config.db_path)
'''

db = pickle.load(open(Config.db_path, 'rb'))
max_train = 20000
max_features = 5000

def make_corpus(keys):
	for key in keys:
		yield db[key]['content']

keys = db.keys()

shuffle(keys)
train_keys = keys[:min(len(keys), max_train)]

print ("train on %d news"% (len(train_keys)))


v = TfidfVectorizer(max_features = max_features, norm='l2', use_idf=True, smooth_idf=True, sublinear_tf=True,
        max_df=1.0, min_df=1)
train_corpus = make_corpus(train_keys)

v.fit(train_corpus)




print ("transforming %d documents"%(len(keys)))


corpus = make_corpus(keys)
X = v.transform(corpus)


print len(v.get_feature_names())
# write full matrix out

out = {}
out['X'] = X # this one is heavy!
print("writing", Config.tfidf_path)
safe_pickle_dump(out, Config.tfidf_path)

# writing lighter metadata information into a separate (smaller) file
out = {}
out['vocab'] = v.vocabulary_
out['idf'] = v._tfidf.idf_
out['pids'] = keys
print("writing", Config.meta_path)
safe_pickle_dump(out, Config.meta_path)


print X.shape
print("precomputing nearest neighbor queries in batches...")
X = X.todense() # originally it's a sparse matrix
sim_dict = {}
batch_size = 200
for i in range(0,len(keys),batch_size):
	i1 = min(len(keys), i+batch_size)
	xquery = X[i:i1] # BxD
	ds = -np.asarray(np.dot(X, xquery.T)) #NxD * DxB => NxB
	IX = np.argsort(ds, axis=0) # NxB
	for j in range(i1-i):
		sim_dict[keys[i+j]] = [keys[q] for q in list(IX[:50,j])]
	print('%d/%d...' % (i, len(keys)))

print("writing", Config.sim_path)
safe_pickle_dump(sim_dict, Config.sim_path)
