# -*- coding: utf-8 -*-

import re, random, math, numpy
import codecs, shelve 
import logging

from django.conf import settings

from financial_daily.utils import WordSegUtils 

logger = logging.getLogger(__name__)

class NewsSummarizer:
    '''
    从文章中抽取摘要词和摘要段落
    对news_summarizer.py的重构
    '''

    # stop words
    stop_words_set = set()
    # 人工摘要词集
    finance_stopwords_set = set()
    # 
    lex_rank_epsilon = float(settings.NEWS_SUMMARIZER_PARAMS['lex_rank_epsilon'])
    #
    lex_rank_damping_factor = float(settings.NEWS_SUMMARIZER_PARAMS['lex_rank_damping_factor'])
    # DF阈值
    df_threshold = float(settings.NEWS_SUMMARIZER_PARAMS['df_threshold'])
    #
    max_keywords_returned = int(settings.NEWS_SUMMARIZER_PARAMS['max_keywords_returned'])
    #
    max_sentences_returned = int(settings.NEWS_SUMMARIZER_PARAMS['max_sentences_returned'])
    #
    sent_cutoff = int(settings.NEWS_SUMMARIZER_PARAMS['sentence_cutoff'])
    # 短句的分隔符
    clause_delimiter = re.compile(
            settings.NEWS_SUMMARIZER_DELIMITERS['clause_delimiter'].decode(settings.DEFAULT_ENCODING))
    # 长句的分隔符
    sentence_delimiter = re.compile(
            settings.NEWS_SUMMARIZER_DELIMITERS['sentence_delimiter'].decode(settings.DEFAULT_ENCODING))
    # 数值正则表达式
    number_regex = re.compile(r'[\d]*.?\d+')
    
    # constructor
    def __init__(self):
        # 加载听用词表
        self.__load_stop_words()
        self.__load_finance_words()

        # term级别的外部处理模块列表
        self.term_processor_list = []

        # sent级别的外部处理模块列表
        self.sent_processor_list = []
    
    def __enter__(self):
        # DF表
        self.DF_table = shelve.open(settings.NEWS_SUMMARIZER_DATA_FILES['doc_freq_file'])
        # 人工摘要词频率表
        self.KF_table = shelve.open(settings.NEWS_SUMMARIZER_DATA_FILES['keyword_freq_file'])
        return self
  
    # destructor
    def __exit__(self, type, value, traceback):
        self.DF_table.close()
        self.KF_table.close()

    def __load_stop_words(self):
        stopwords_file_name = settings.NEWS_SUMMARIZER_DATA_FILES['stop_words_file']

        with codecs.open(stopwords_file_name, mode='r', 
                encoding=settings.DEFAULT_ENCODING) as stopwords_file:
            
            stopwords = [line.strip() for line in stopwords_file if not line.startswith('#')]
            for stopword in stopwords:
                if len(stopword) > 0 and not stopword in self.stop_words_set:
                    self.stop_words_set.add(stopword)

    def __load_finance_words(self):
        finance_stopwords_file_name = settings.NEWS_SUMMARIZER_DATA_FILES['financial_stopwords_file']
        with codecs.open(finance_stopwords_file_name, mode='r', 
                encoding=settings.DEFAULT_ENCODING) as finance_stopwords_file:
            
            finance_stopwords = [line.strip() for line in finance_stopwords_file if not line.startswith('#')]

            for term in finance_stopwords:
              self.finance_stopwords_set.add(term)

    def get_sentences(self, doc_title, doc_content):
        return self.__break_sentences(doc_title, doc_content)

    def get_gist_sentences(self, doc_title, doc_content):
        sentence_terms_list, sentence_list = self._export_sentences(doc_title, doc_content)
        if len(sentence_terms_list) <= self.max_sentences_returned:
            return sentence_list
        cos_table, table_size = self._calc_sent_cos_similarity(sentence_terms_list)
        sentence_mat = self.__calc_lex_rank(cos_table, table_size)
        summary_sents_idx = [idx for idx in sentence_mat.argsort().tolist()[0][-self.max_sentences_returned:]]
        summary_sents_idx.sort() # 以句子在原文中出现位置排序
        summary_sents = [sentence_list[idx] for idx in summary_sents_idx]
        return summary_sents 

    def get_keywords(self, doc_title, doc_content, use_kf = True, debug = False, num_words_returned=None):
        conn_table = None
        table_size = 0
        term_list = None
        tf_conn = None
    
        # 获取term的连通簇
        term_cliques = self._get_term_cliques(doc_title, doc_content)

        conn_table, table_size, term_list = self._calc_term_connectivity(term_cliques)
        keywords_mat = self.__calc_lex_rank(conn_table, table_size)

        prev_offset = -2

        if not use_kf:
            for idx in xrange(0, table_size, 1):
                key =  term_list[idx][0].encode(settings.DEFAULT_ENCODING)
                if debug:
                  self.logger.debug(key + '(OS:' +  str(keywords_mat[0, idx]) + ', DG:' + str(len(term_list[idx][2])) + ', DF:' + str(self.DF_table[key]) + ')\n')
                keywords_mat[0, idx] /= self.DF_table[key]
        else:
            for idx in xrange(0, table_size, 1):
                key =  term_list[idx][0].encode(settings.DEFAULT_ENCODING)
                if debug:
                    self.logger.debug(key + '(OS:' +  str(keywords_mat[0,idx]))
                if self.KF_table.has_key(key):
                    keywords_mat[0, idx] *= self.KF_table[key] / self.DF_table[key]
                else:
                    keywords_mat[0, idx] = 0
                if debug:
                    self.logger.debug(', DG:' + str(len(term_list[idx][2])) \
                        + ', DF:' + str(self.DF_table[key]) \
                        + ', FS:' +  str(keywords_mat[0, idx]) + ')\n')

        if num_words_returned == None:
            num_words_returned = self.max_keywords_returned
        keywords = [term_list[idx][0] for idx in keywords_mat.argsort().tolist()[0][-num_words_returned:]]
        keywords.reverse() # puts the most important keywords in the front
        return keywords 
  
    def add_term_processor(self, external_processor):
        self.term_processor_list.append(external_processor)
  
    def add_sent_processor(self, external_processor):
        self.sent_processor_list.append(external_processor)

    def __calc_lex_rank(self, trans_table, table_size):
    
        trans_matrix = self.__normalize_table(trans_table, table_size)
        
        prev_p = numpy.mat((1 / float(table_size)) * numpy.ones(table_size))
        # make the initial error large enough
        pre_err = 1000.0
        damping_factor = self.lex_rank_damping_factor 
        damp_m = numpy.mat((1-damping_factor) * numpy.ones(table_size))
        iter_count = 0
        while True:
            cur_p = prev_p * trans_matrix * damping_factor + damp_m 
            p_diff = cur_p - prev_p
            cur_err = math.sqrt(p_diff * p_diff.T)
            if cur_err < self.lex_rank_epsilon: 
                return cur_p
            elif iter_count > 100:
                self.logger.warn('Warning: cyclic trans_matrix! Last error is:%f', pre_err)
                return prev_p
            else:
                iter_count += 1
                prev_p = cur_p
                pre_err = cur_err

    def __idf_vec_product(self, term_vector):
        vec_product = 0
        for cur_term in term_vector.items():
            vec_product += (cur_term[1] * \
                    self.DF_table[cur_term[0].encode(settings.DEFAULT_ENCODING)])**2
        return math.sqrt(vec_product)
  
    def __normalize_table(self, table, table_size):
    
        cos_matrix = numpy.zeros((table_size, table_size))
        for i,j in table.keys():
            if i == j:
                cos_matrix[i, j] = 0.0
            else:
                cos_matrix[i, j] = cos_matrix[j, i] = table[i, j]

        # calculate the row sum
        row_sum = cos_matrix.sum(dtype=float, axis=1)

        for row in xrange(0, table_size, 1): 
            for col in xrange(0, table_size, 1):
                if row_sum[row] > 0.0:
                    cos_matrix[row,col] /= row_sum[row]
        
        return cos_matrix

    def __normalize_row_min_element(self, row, row_size, sent_cos_table, normalized_cos_table):
        # find the minimum element
        min_ele = sent_cos_table[0, row]
        for i in xrange(0, row, 1):
            if sent_cos_table[i, row] < min_ele:
                min_ele = sent_cos_table[i, row]
        for j in xrange(row+1, row_size, 1):
            if sent_cos_table[row, j] < min_ele:
                min_ele = sent_cos_table[row, j]
        denominator = 1.0 - min_ele
        for j in xrange(row, row_size, 1):
              normalized_cos_table[row, j] = (sent_cos_table[row, j] - min_ele) / \
                                             denominator

    # 输出一篇文章中句子余弦相似度
    def _calc_sent_cos_similarity(self, sent_tf_list):
        sent_cos_table = {}
        normalized_cos_table = {}
        sent_idf_vec_product = []
        for sent in sent_tf_list:
            sent_idf_vec_product.append(self.__idf_vec_product(sent))
          
        sent_count = len(sent_tf_list)
        for i in xrange(0, sent_count, 1):
            for j in xrange(i, sent_count, 1):
                i_j_vec_product = 0
                for cur_term in sent_tf_list[i].items():
                    if sent_tf_list[j].has_key(cur_term[0]):
                        i_j_vec_product += cur_term[1] * sent_tf_list[j][cur_term[0]] * \
                            self.DF_table[cur_term[0].encode(settings.DEFAULT_ENCODING)]**2

                sent_cos_table[i,j] = i_j_vec_product / (sent_idf_vec_product[i] * \
                                        sent_idf_vec_product[j])
            # normalize rows to consider max&min elements
            self.__normalize_row_min_element(i, sent_count, sent_cos_table, normalized_cos_table)
         
        return normalized_cos_table, sent_count

    def _calc_term_connectivity(self, term_clique_dict):
        '''
        此过程的主要目的是将间隔在两个term以内(通窗口为2)的term化在一个连通簇(clique)内
        每个term所在的连通簇为计算其连通度，即其作为摘要词的重要性，起到基础作用
        '''
        term_clique_list = []
        # 依据term的首次出现位置由低到高排序
        for item in sorted(term_clique_dict.iteritems(), key=lambda x: x[1][0]): 
            key = item[0]
            if self.DF_table[key.encode(settings.DEFAULT_ENCODING)] > self.df_threshold:
                continue
            term_clique_list.append((item[0], item[1][0], item[1][1]))

        term_connect_table = {}
        term_count = len(term_clique_list) # 连通簇的大小
        for i in xrange(0, term_count, 1):
            for term_i in term_clique_list[i][2]:
                for j in xrange(i, term_count, 1):
                    j_cliques = term_clique_list[j][2]
                    # 计算连通窗口为2的所有term
                    if term_i - 2 in j_cliques or \
                        term_i - 1 in j_cliques or \
                        term_i + 1 in j_cliques or \
                        term_i + 2 in j_cliques:
                        term_connect_table[i,j] = 1.0

        return term_connect_table, term_count, term_clique_list

    def _get_term_cliques(self, title, content, filterred_tokens = None):
        # term id, for ordering the terms in the final output
        term_id = -1
        if content != None:
            clauses = self.clause_delimiter.split(content.strip())
            # prepend title to clauses
            clauses.insert(0, title)
            cur_term_clique = {} # term frequency for this doc 
            for clause in clauses:
                clause = clause.strip()
                if len(clause) > 0:
                    seg_tokens = WordSegUtils.word_segment_phrase(clause) #粗粒度
                    for token in seg_tokens:
                        term = token[settings.WORD_SEG_LEX_TEXT]
                        term_id += 1
                        
                        # 先将term交给外部处理模块
                        for processor in self.term_processor_list:
                            processor.process(term)
                        
                        # removes stopwords
                        if len(term) < 2 or \
                            self.number_regex.match(term) != None or \
                            term in self.stop_words_set or \
                            (filterred_tokens != None and filterred_tokens.search(term)) or \
                            not self.DF_table.has_key(term.encode(settings.DEFAULT_ENCODING)): 
                            continue
                        if term in self.finance_stopwords_set:
                            continue
                        if self.DF_table[term.encode(settings.DEFAULT_ENCODING)] < self.df_threshold:
                            if cur_term_clique.has_key(term):
                                cur_term_clique[term][1].add(term_id)
                            else:
                                cur_term_clique[term] = (term_id, set([term_id]))
            # 字典中每一条数据的格式是:
            # {名称:(位置1，[位置1，位置2...])}
            return cur_term_clique

    # 从数据库导出新闻文本
    def _export_sentences(self, title, content):
        if content != None:
            sents = self.sentence_delimiter.split(content.strip())
            sent_id = 0
            sentence_terms_list = []
            sentence_list = []
            for sent in sents:
                sent = sent.strip()
                if len(sent) > self.sent_cutoff: #and not sent_dict.has_key(sent): 
                    cur_TF = {} # term frequency for this sentence 
                    seg_tokens = WordSegUtils.word_segment_phrase(sent) #粗粒度
                    for token in seg_tokens:
                        term = token[settings.WORD_SEG_LEX_TEXT]
                        if len(term) < 2 or \
                            self.number_regex.match(term) != None or \
                            term in self.stop_words_set or \
                            not self.DF_table.has_key(term.encode(settings.DEFAULT_ENCODING)): 
                            continue
                        if cur_TF.has_key(term):
                            cur_TF[term] += 1
                        else:
                            cur_TF[term] = 1

                    if len(cur_TF) > 0:
                        sentence_terms_list.append(cur_TF)
                        sentence_list.append(sent)
                        sent_id += 1

            return sentence_terms_list, sentence_list

    def __break_sentences(self, title, content):
        if content != None:
            sents = self.sentence_delimiter.split(content.strip())
            sent_id = 0
            sentence_list = []
            for sent in sents:
                sent = sent.strip()
                if len(sent) > self.sent_cutoff: #and not sent_dict.has_key(sent): 
                    seg_tokens = WordSegUtils.word_segment_phrase(sent) #粗粒度
                    for token in seg_tokens:
                        term = token[settings.WORD_SEG_LEX_TEXT]
                        
                        # 将term交给外部处理模块
                        for processor in self.sent_processor_list:
                            processor.process(term, sent_id)

                    sentence_list.append(sent)
                    sent_id += 1
            
            return sentence_list
