#!/usr/bin/env python
#encoding:utf8

''' several methods could be used to optimize the code
1, use numpy to speed-up computing
2, here UndirectWeightedGraph is used, maybe Direct Weight Graph is more suitable
3, expand stop words and token words

several parameters could be modified to effect the results:
1, span
2, weight. the weight could be attenuating along with span
3, stopwords 

questions:
1, how to extract keywords on-line
'''

import sys
import pdb
from collections import defaultdict

class UndirectWeightedGraph:
    d = 0.85
    iteration=10  # iteration for graph to calculate matrix

    def __init__(self):
        self.graph = defaultdict(list)

    def addEdge(self, start, end, weight):
        # use a tuple (start, end, weight) instead of a Edge object
        self.graph[start].append((start, end, weight))
        self.graph[end].append((end, start, weight))

    def rank(self):
        ws = defaultdict(float)
        outSum = defaultdict(float)

        wsdef = 1.0 / (len(self.graph) or 1.0)
        for n, out in self.graph.items():
            ws[n] = wsdef
            outSum[n] = sum((e[2] for e in out), 0.0)

        # this line for build stable iteration
        sorted_keys = sorted(self.graph.keys())
        for x in xrange(self.iteration):  # TODO
            for n in sorted_keys:
                s = 0
                for e in self.graph[n]:
                    s += e[2] / outSum[e[1]] * ws[e[1]]
                ws[n] = (1 - self.d) + self.d * s
        (min_rank, max_rank) = (sys.float_info[0], sys.float_info[3])
        # normanizing
        for w in ws.itervalues():
            if w < min_rank:
                min_rank = w
            elif w > max_rank:
                max_rank = w
        for n, w in ws.items():
            ws[n] = (w - min_rank / 10.0) / (max_rank - min_rank / 10.0)

        return ws

class KeywordExtractor(object):
    STOP_WORDS = set((
        "the", "of", "is", "and", "to", "in", "that", "we", "for", "an", "are",
        "by", "be", "as", "on", "with", "can", "if", "from", "which", "you", "it",
        "this", "then", "at", "have", "all", "not", "one", "has", "or", "that"
    ))
    def set_stop_words(self, stop_words_path):
        ''' to set stop words'''
        abs_path = _get_abs_path(stop_words_path)
        if not os.path.isfile(abs_path):
            raise Exception("jieba: file does not exist: " + abs_path)
        content = open(abs_path, 'rb').read().decode('utf-8')
        for line in content.splitlines():
            self.stop_words.add(line)
    def extract_tags(self, *args, **kwargs):
        raise NotImplementedError


class TextRank(KeywordExtractor):
    span = 5   # TODO
    # the side-scope of the words to use to compute the relationship
    def __init__(self):
        self.stop_words = self.STOP_WORDS.copy()
        self.pos_filt = frozenset(('ns', 'n', 'vn', 'v'))

    def pairfilter(self, wp):
        return (wp.flag in self.pos_filt and len(wp.word.strip()) >= 2
                and wp.word.lower() not in self.stop_words)

    def textrank(self, words, topK=20, withWeight=False, allowPOS=('ns', 'n', 'vn', 'v')):
        """
        Extract keywords from sentence using TextRank algorithm.
        Parameter:
            - words: should contain properties: (flag, word)
            - topK: return how many top keywords. `None` for all possible words.
            - withWeight: if True, return a list of (word, weight);
                          if False, return a list of words.
            - allowPOS: the allowed POS list eg. ['ns', 'n', 'vn', 'v'].
                        if the POS of w is not in this list, it will be filtered.
        """
        self.pos_filt = frozenset(allowPOS)
        g = UndirectWeightedGraph()
        cm = defaultdict(int)
        for i, wp in enumerate(words):
            if self.pairfilter(wp):
                for j in xrange(i + 1, i + self.span):  # add span to expand the scope
                    if j >= len(words):
                        break
                    if not self.pairfilter(words[j]):
                        continue
                    cm[(wp.word, words[j].word)] += 1
        for terms, w in cm.items():
            g.addEdge(terms[0], terms[1], w)
        nodes_rank = g.rank()
        pdb.set_trace()
        if withWeight:
            tags = sorted(nodes_rank.items(), key=lambda x:x[1], reverse=True)
        else:
            tags = sorted(nodes_rank, key=nodes_rank.__getitem__, reverse=True)
        if topK:
            return tags[:topK]
        else:
            return tags
    extract_tags = textrank

def test():
    sentence = """将每个句子看成图中的一个节点，若两个句子之间有相似性，认为对应的两个节点之间有一个无向有权边，权值是相似度。"""
    import jieba
    import jieba.posseg
    words = jieba.posseg.cut(sentence)
    tr = TextRank()
    kw = tr.textrank(tuple(words), withWeight=True)
    #kw = tr.textrank(tuple(words))
    for item in kw: 
        if isinstance(item, tuple): print item[0]
        else: print item
    

if __name__ == "__main__":
    test()
        

