import sys

sys.path.append("../")
sys.path.append("/usr/lib/python3/dist-packages")
sys.path.append("/usr/local/lib/python3.5/dist-packages")
sys.path.append('/Users/xuchaosheng/anaconda3/lib/python3.6/site-packages')

from gensim import corpora, models, similarities
from pymongo import MongoClient
from gensim import corpora
from pprint import pprint

from six import iteritems

import jieba.posseg as pseg
import logging
import os

logging.basicConfig(format = '%(asctime)s : %(levelname)s : %(message)s', level = logging.INFO)

client = MongoClient('47.104.130.19', 27017)
# client = MongoClient('127.0.0.1', 27017)

knx_posts_db = client['knx_posts_db']

offical_posts_coll = knx_posts_db['offical_posts_coll']
platform_posts = knx_posts_db['platform_posts']

stoplist = []

with open('/Users/xuchaosheng/Workspace/KNX/stopWords.txt') as f:
    for i in f.readlines():
        stoplist.append(i.strip())

tag_list = [file.split('-')[0] for file in os.listdir('offical_train') if file != '.DS_Store']
posts = [open('offical_train/' + file, encoding = 'utf-8').read() for file in os.listdir('offical_train') if file != '.DS_Store']
texts = [[word for word, flag in pseg.cut(open('offical_train/' + file, encoding = 'utf-8').read()) if word not in stoplist and flag in ['v', 'n', 'vn']] for file in os.listdir('offical_train') if file != '.DS_Store']

dictionary = corpora.Dictionary(texts)
stop_ids = [dictionary.token2id[stopword] for stopword in stoplist if stopword in dictionary.token2id]
once_ids = [tokenid for tokenid, docfreg in iteritems(dictionary.dfs) if docfreg == 1]
dictionary.filter_tokens(stop_ids + once_ids)
dictionary.compactify()

corpus = [dictionary.doc2bow(text) for text in texts]

tfidf = models.TfidfModel(corpus)

corpus_tfidf = tfidf[corpus]
lsi = models.LsiModel(corpus_tfidf, id2word = dictionary)
corpus_lsi = lsi[corpus_tfidf]

index = similarities.MatrixSimilarity(lsi[corpus])


def answer(result):
    fcount, mcount, bcount = result.count('f'), result.count('m'), result.count('b')

    if fcount >= mcount and fcount >= bcount:
        return 'f'
    elif mcount >= fcount and mcount >= bcount:
        return 'm'
    else:
        return 'b'


collections = [platform_posts]

for collection in collections:
    cursor = collection.find({}, no_cursor_timeout = True)

    for jd in cursor:
        if 'divide' in jd:
            continue

        new_doc = jd['description']

        vec_bow = dictionary.doc2bow([word for word, flag in pseg.cut(new_doc) if flag in ['v', 'n'] and word not in stoplist])
        vec_lsi = lsi[vec_bow]

        sims = index[vec_lsi]
        sims = sorted(enumerate(sims), key = lambda item: -item[1])[:10]

        result = []

        for i in sims:
            result.append(tag_list[i[0]])

        ans = answer(result)
        collection.update({'_id': jd['_id']}, {'$set': {'divide': ans}})
        print(jd['name'], ans)

    cursor.close()
