import sys, time, re, json
import math, collections, itertools
import sqlite3, spade, datetime

import nltk, nltk.classify.util, nltk.metrics
#from nltk.classify import NaiveBayesClassifier
#from nltk.metrics import BigramAssocMeasures
#from nltk.probability import FreqDist, ConditionalFreqDist
from nltk.corpus import wordnet, stopwords
from collections import Counter
from collections import defaultdict

if __name__ == "__main__":

    with open("dictionary.txt",'r') as f:
            a = json.loads(f.readline())

    b = Counter()
    c = Counter()

    for word in a['NN']:
            if not word.startswith("NEG_"):
                b[word] = a['NN'][word]

    for word in a['NN']:
            if word.startswith("NEG_"):
                c[word.strip("NEG_")] = a['NN'][word]

    dictionary = b + c

    a = b = c = None

    topics = defaultdict(list)

    for word in [w for (w,_) in dictionary.most_common(10)]:
            topics[word].append(word)

    for word in dictionary:
            best_topic = word
            best_sim = 0
            wsyn = wordnet.synsets(word)
            if wsyn:
                for topic in topics:
                    for aspect in topics[topic]:
                        tsyn = wordnet.synsets(aspect)
                        if tsyn: similarity = wsyn[0].wup_similarity(tsyn[0])
                        else: similarity = -1
                        if similarity > best_sim:
                            best_sim = similarity
                            best_topic = topic
                if similarity > 0.4: topics[best_topic].append(word)
                else: topics[word].append(word)
            else: topics["UNK"].append(word)

    with open("results.txt",'w') as f:
            f.write(json.dumps(topics))














