#!/usr/bin/python
from __future__ import print_function
import argparse
import os
import operator
import sys
import string

""" 
   Basic class to analysis the <mode.assign> file.
   to print the final sentence cluser, we also need
   another file, namely <RemoveStop.temp>.
"""

class ModeAssign:
    assign_dir = ""
    remove_stop_dir = ""
    corpus = {}
    assign = {}
    # sorted leaf node format as: 
    # [(node:doc1:doc2, doc_count), ...]
    sorted_leaf_node = []
    leaf_node = []
    docs_num = 0
    # store the every doc's path.
    # format as: {doc_id:levels list}
    docs_levels = {} 
    tree_depth = 3
    #  dictionary of every node and its level. 
    nodes_levels = {}
    # default display result on stdout.
    output = sys.stdout

    def __init__(self, tree_depth=3, output=sys.stdout):
        """init for stdard output and depth of tree"""
        self.output = output
        self.tree_depth = tree_depth
        print("setting tree_depth: %d" % tree_depth)

    def read_files(self, assign_dir):
        """read files needed during the analysis of assign"""

        self.assign_dir = assign_dir
        if not os.path.isfile(self.assign_dir):
            sys.stdout.write("assign file not found!\n")
            sys.exit(1)
        sys.stdout.write("[%s] loaded!\n" % self.assign_dir)

        with open(self.assign_dir, 'r') as f:
            lines = f.readlines()
            self.docs_num = len(lines)
            for line in lines:
                words = string.split(line.strip(), " ")
                doc_no = words[0]
                # default topic nodes, for all levels. 
                leaf_node = words[len(words)-1]
                all_node = words[2:len(words)]
                self.docs_levels[doc_no] = all_node
                # record the level of all node for each line of assign.
                for level in range(len(all_node)):
                    if not self.nodes_levels.has_key(all_node[level]):
                        self.nodes_levels[all_node[level]] = str(level)
                        #print("%s belong level %d" % (all_node[level], level))
                
                if self.assign.has_key(leaf_node):
                    self.assign[leaf_node] += ":" + doc_no
                else:
                    self.assign[leaf_node] = leaf_node + ":" +  doc_no
        # temp unsorted dict {doc_id: doc_nums}
            unsorted_node = {}
            for key, value in self.assign.iteritems():
                unsorted_node[value] = len(string.split(value, ':'))
        
            self.sorted_leaf_node = sorted(unsorted_node.iteritems(), key=operator.itemgetter(1))
            self.sorted_leaf_node.reverse()

    def print_assign_path(self, remove_stop_dir):
        """Print sentence path cluster results"""

        self.remove_stop_dir = remove_stop_dir
        if not os.path.isfile(self.remove_stop_dir):
            sys.stdout.write("remove stop corpus file not found!\n")
            sys.exit(1)
        sys.stdout.write("loading [%s]\n" % self.remove_stop_dir)

        with open(self.remove_stop_dir, 'r') as f:
            sen_no = 0
            for line in f.readlines():
                self.corpus[str(sen_no)] = line
#                sys.stdout.write("add line %s\n" % line)
                sen_no += 1
        for (key, value) in self.sorted_leaf_node:
            docs = string.split(key, ':')
            print('%d\t' % (len(docs)-1), file=sys.stdout)
            print('+++++++++++++++++++++++++++++++++++++++++++++  %d   ++++++++++++++++++++++++++++++++++++++++++' % (len(docs)-1), file=self.output)
            for doc in docs[1:]:
                if self.corpus.has_key(doc):
                    print("%s:\t%s" % (doc, self.corpus[doc]), file=self.output)
                else:
                    print("%s not found!" % doc, file=sys.stderr)
        # sort dict according to doc_nums

if __name__ == '__main__':
    
    parser = argparse.ArgumentParser(description='stupid path assign analysis for hlda-c result')
    parser.add_argument('--assign', type=str, required=True, help='mode.assign file position')
    parser.add_argument('--corpus', type=str, required=True, help='corpus file to contain origin sentence per line')
    parser.add_argument('--output', type=str, nargs='?', help='destinationf file for the analysis result')
    parser.add_argument('--depth', type=int, nargs='?', default=3, help='setting for sentence depth')
    args = parser.parse_args()
    if not args.output:
        output = sys.stdout
    else:
        output = open(args.output, 'w')

    mode_assign = ModeAssign(args.depth, output)
    mode_assign.read_files(args.assign);
    mode_assign.print_assign_path(args.corpus);
    sys.stdout.write("mode assign analysis done!\n")

