#!/usr/bin/env python
from __future__ import print_function
import sys
import argparse
import os
import string
import operator
# three main analysis class 
import assign
import levels
import mode

"""
    simple sentence extraction according to mode.assign analysis and
    mode analysis results.
"""

class Extract:
    # path for assign file
    assign_path = ""
    levels_path = ""
    mode_path = ""

    tree_depth = 3
    output = sys.stdout
    
    docs_score = {}
    # default feature dimension for document score
    feature_dim = 100 

    def __init__(self, tree_depth=3, output=sys.stdout, feature_dim=100):

        self.tree_depth = tree_depth
        self.output = output
        self.feature_dim = feature_dim

        
    def read_files(self, assign_path, levels_path, mode_path):
        """Read all hlda result files to get information which are needed to score the sentences."""

        print("now, check all files necessary for the sentence extraction", file=sys.stdout) 
        print("<1>. check the [mode.asssign] file...", file=sys.stdout)
        if not os.path.exists(assign_path):
            print("Error: %s doesn't exist! please check..." % assign_path, file=sys.stderr)
            sys.exit(1)
        print("<2>. check the [mode.levels] file...", file=sys.stdout)
        if not os.path.exists(levels_path):
            print("Error: %s doesn't exist! please check..." % levels_path, file=sys.stderr)
            sys.exit(1)
        print("<3>. check the [mode] file...", file=sys.stdout)
        if not os.path.exists(mode_path):
            print("Error: %s doesn't exist! please check..." % mode_path, file=sys.stderr)
            sys.exit(1)
        print("check done!")
        self.assign_path = assign_path
        self.levels_path = levels_path
        self.mode_path = mode_path

    def extract_sen(self):
        """Main entrance for sentence extraction"""

        c_assign= assign.ModeAssign(self.tree_depth, self.output)
        c_assign.read_files(self.assign_path)
        
        c_levels = levels.Levels(self.levels_path)
        c_mode = mode.Mode(self.mode_path)
        c_mode.sort_topic_freq()

        
        # first initialize the origin score for every document.
        for doc_id in range(c_assign.docs_num):
            self.docs_score[str(doc_id)] = 1.0
        
        #now accroding to the above middle variables to caculate sentences scores.
        for (docs, freq) in c_assign.sorted_leaf_node:
            node_docs = string.split(docs, ':')
            leaf_node = node_docs[0] 
            # for every docs on the leaf node, caculate the corresponding score.
            for doc in node_docs[1:]:
                # get all the nodes all though the path of this docs.
                if not c_assign.docs_levels.has_key(doc):
                    print("Error: %s cannot find in assign docs levels" % doc, file=sys.stderr)
                    continue
                doc_path = c_assign.docs_levels[doc]
                for level_id in range(len(doc_path)):
                    # get every topic(node) of docs
                    node = doc_path[level_id]
                    node_level = level_id 
                    # get this node's weight from it's total words num and total documents
                    if not c_mode.topic.has_key(node):
                        print("Error: %s cann't find in mode" % leaf_node, file=sys.stderr)
                        continue

                    ndocs = c_mode.topic[node][2]
                    nwords = c_mode.topic[node][3]
                    # also to prevent 0 words in some middle levels, we assume the denominator to 1.0
                    if int(nwords) == 0:
                        nwords = "1" 

                    # caculate this node weight:
                    node_weight = float(ndocs)/float(c_assign.docs_num)


                    #if not c_assign.nodes_levels.has_key(leaf_node):
                    #    print("Error: %s cann't find in assign nodes levels" % leaf_node, file=sys.stderr)
                    #    continue
                    # record the level of current node
                    #node_level = c_assign.nodes_levels[leaf_node]
                    # get the sorted word count for this node
                    for index in range(self.feature_dim):
                        word_freq = c_mode.sorted_wd_cnt[node][index]
                        word = word_freq[0]
                        freq = word_freq[1]
                        to_find = word + ":" + str(node_level)
                        # get the levels of each node and documents num.
                        # for every topest freq words, construnct it with word:level, and then compare it
                        # to the documents to see whether it contained.

                        #for doc in node_docs[1:]:
                        if to_find in c_levels.doc_wd_level[doc]:
                            # caculate this hitted top word weight
                            hit_word_weight = float(freq) / float(nwords)
                            #print("hit word weight %f" %  hit_word_weight)
                        
                            # caculate the relative weight of sentences.
                            # relative word frequency, we assume that, the score of documents contains two parts:
                            # 1. node's weight: n_docs / tot_docs
                            # 2. for every node, top freq words weights: word_freq / tot_words
                            # we frist caculate only based on the leaf node.
                            self.docs_score[doc] +=  hit_word_weight * node_weight * 1000

                
                #print("for %s doc: %s freq: %s score:  %f" % (node, doc, freq, self.docs_score[doc]), file=sys.stdout)

    def print_score_topK(self, K):
        """print topK score sentence No."""
        # sorted for the score, and print it out.
        sorted_score = sorted(self.docs_score.iteritems(), key=operator.itemgetter(1))
        sorted_score.reverse()
        for i in range(K):
            print("doc: %s\tscore: %f" % sorted_score[i], file=sys.stdout)

    def print_sentence_topK(self, corpus_path, K):
        """select sentences with topK score"""

        if not os.path.isfile(corpus_path):
            print("Error: %s may not be a validate file, please check it" % corpus_path)
            sys.exit(1)
        #dict for sentence_id and sentence_content
        corpus = {}
        with open(corpus_path, 'r') as fr:
            lines = fr.readlines()
            sen_no = 0
            for line in lines:
                corpus[str(sen_no)] = line
                sen_no += 1
        sorted_score = sorted(self.docs_score.iteritems(), key=operator.itemgetter(1))
        sorted_score.reverse()
        for i in range(K):
            if not corpus.has_key(sorted_score[i][0]):
                print("Error: cannot find % in corpus file!" % sorted_score[i][0], file=sys.stderr)

            print("%f\t %s " % (sorted_score[i][1], corpus[sorted_score[i][0]]), file=sys.stdout)



if __name__ == '__main__':
    
    parser = argparse.ArgumentParser(description='stupid sentence extraction for hlda-c result and multi-document summary')
    parser.add_argument('--assign', type=str, required=True, help='mode.assign file position')
    parser.add_argument('--levels', type=str, required=True, help='mode.levels file path')
    parser.add_argument('--mode', type=str, required=True, help='mode file path')
    parser.add_argument('--corpus', type=str, nargs='?', help='corpus file to contain origin sentence per line')
    parser.add_argument('--output', type=str, nargs='?', help='destinationf file for the analysis result')
    parser.add_argument('--depth', type=int, nargs='?', default=3, help='setting for sentence depth')
    parser.add_argument('--K', type=int, nargs='?', default=40, help='extract the top k topest score of sentence')
    parser.add_argument('--D', type=int, nargs='?', default=100, help='feature dimension when score for every sentence') 
    args = parser.parse_args()
    if not args.output:
        output = sys.stdout
    else:
        output = open(args.output, 'w')
    
    extract = Extract(args.depth, output, args.D)
    extract.read_files(args.assign, args.levels, args.mode)
    extract.extract_sen()
    
    if args.corpus:
        extract.print_sentence_topK(args.corpus, args.K)

    sys.stdout.write("Extract down\n")
    
