import fileModel


def print_word_idf(path):
    data = fileModel.read_data(path)
    count = {}
    for line in data:
        tmpSet = set()
        for word in line:
            if word in tmpSet:
                continue
            tmpSet.add(word)
            if word not in count:
                count[word] = 0
            count[word] += 1
    count = sorted(count.items(), key=lambda d:d[1], reverse=True)

    for line in count:
        print(line)


def print_word_count(path, ifprint = True):
    data = fileModel.read_data(path)
    count = {}
    for line in data:
        for word in line:
            if word not in count:
                count[word] = 0
            count[word] += 1
    kcount = sorted(count.items(), key=lambda d:d[1], reverse=True)

    if ifprint:
        for line in kcount:
            print(line)
    return count


def print_word_in_topic(data_path, phi_path, wordmap_path):
    word_count = print_word_count(data_path, False)

    phi = fileModel.read_float_data(phi_path)

    for oneTopic in phi:
        min_value = 100
        for item in oneTopic:
            if min_value>item:
                min_value = item

        for i in range(len(oneTopic)):
            if oneTopic[i] == min_value:
                oneTopic[i] = 0

    wordmap,id2word = fileModel.read_wordMap_file(wordmap_path)
    word_in_topic = {}
    # word_in_topic_num = {}

    for word_id in range(len(phi[0])):
        for topic_id in range(len(phi)):
            if phi[topic_id][word_id] > 0:
                if word_id not in word_in_topic:
                    word_in_topic[word_id] = []
                    # word_in_topic_num[word_id] = 0
                word_in_topic[word_id].append(topic_id)
                # word_in_topic_num[word_id] += 1

    word_in_topic = sorted(word_in_topic.items(), key = lambda d:len(d[1]), reverse=True)

    for key in word_in_topic:
        print("%5d %15s %4d %2d %s" % (key[0],id2word[key[0]], word_count[id2word[key[0]]] , len(key[1]), '['+' '.join([str(item) for item in key[1]])+']'))


def print_topic_in_doc(theta_path, wordmap_path, data_path):
    raw_data = fileModel.read_data(data_path)

    theta = fileModel.read_float_data(theta_path)

    for oneTopic in theta:
        min_value = 100
        for item in oneTopic:
            if min_value>item:
                min_value = item

        for i in range(len(oneTopic)):
            if oneTopic[i] == min_value:
                oneTopic[i] = 0

    # wordmap,id2word = fileModel.read_wordMap_file(wordmap_path)
    topic_in_doc = {}
    # word_in_topic_num = {}

    for docIndex, oneTopic in enumerate(theta):
        for topic_id, topic_value in enumerate(oneTopic):
            if topic_value > 0:
                if docIndex not in topic_in_doc:
                    topic_in_doc[docIndex] = []
                    # word_in_topic_num[word_id] = 0
                topic_in_doc[docIndex].append(str(topic_id))
                # word_in_topic_num[word_id] += 1

    word_in_topic = sorted(topic_in_doc.items(), key = lambda d:len(d[1]), reverse=True)

    for key in word_in_topic:
        print("%5d %3d %3d" % (key[0], len(raw_data[key[0]]), len(key[1])), end=' ')
        print(key[1])


if __name__ == "__main__":
    root = "D:/javaEE/MLDA/data4/news/"
    # print_word_count(root + "news.data")
    kroot = root+ "LDA-0.1-0.01-100/0/"
    # print_word_in_topic(root+"news.data",kroot+"model-final.phi", kroot+"wordmap.txt")
    print_topic_in_doc(kroot+"model-final.theta", kroot+"wordmap.txt", root+"news.data")

