# -*- coding: utf-8 -*-

import os, glob, codecs, re
import httplib, urllib
import logging
import json

from sets import Set

from django.conf import settings

from companies.models import QuestionPolarity
from financial_daily.utils import WordSegUtils

logger = logging.getLogger(__name__)

class PolarityCheck:
    '''
    判断输入文本的正负极性。
    '''
    
    def __init__(self):
        # 加载特征词文件
        self.__load_features_file()
        # 加载辅助词表 
        self.__load_aux_tokens()
        # 加载程度词表 
        self.__load_degree_tokens()
        # 加载自动机状态转移表
        self.__load_intermediate_transfer_table()
        self.__load_final_transfer_table()
        # 字符分隔符
        self.clause_delimiter = re.compile(settings.NEWS_SUMMARIZER_DELIMITERS['clause_delimiter'].decode(settings.DEFAULT_ENCODING))
        self.token_delimiter = settings.NEWS_SUMMARIZER_DELIMITERS['token_delimiter']
        # Token List
        self.token_list = []


    def __enter__(self):
        print 'PolarityCheck object initialized.'

    # destructor
    def __exit__(self, type, value, traceback):
        print 'PolarityCheck object destroyed.'

    def get_text_polarity(self, text_str, isdebug=False):
        try:
            token_info = self.__convert_text_to_feature_sequence(text_str, isdebug)
            result = self.__get_sequence_polarity(isdebug)[0]
        except Exception as e:
            result = QuestionPolarity.UNKNOWN
        else:
            if result == "POSITIVE":
                result = QuestionPolarity.POSITIVE
            elif result == "NEGATIVE":
                result = QuestionPolarity.NEGATIVE
            elif result == "NEUTRAL":
                result = QuestionPolarity.NEUTRAL
            elif result == "UNCERTAIN":
                result = QuestionPolarity.UNCERTAIN
            elif result == "UNKNOWN":
                result = QuestionPolarity.UNKNOWN
            else:
                result = QuestionPolarity.UNKNOWN 

        return result, token_info

    def __load_features_file(self):
        self.features_set = {}
        feature_file = None
        feature_file_name = settings.POLARITY_DATA_FILES['feature_file_name']
        with codecs.open(feature_file_name, mode='r', 
                         encoding=settings.DEFAULT_ENCODING) as feature_file:
            lines = [line.strip() for line in feature_file if len(line) > 0]
            for line in lines:
                features = line.split('\t')
                if len(features) > 0 and not self.features_set.has_key(features[0]):
                    self.features_set[features[0]] = features[1:]
   
    def  __load_aux_tokens(self):
        feature_tags = settings.POLARITY_FEATURE_TAGS
        self.aux_tokens = Set()
        self.aux_tokens.add(feature_tags['AUX_POS'][0])
        self.aux_tokens.add(feature_tags['AUX_NEG'][0])
        self.aux_tokens.add(feature_tags['AUX_NEU'][0])

    def __load_degree_tokens(self):
        feature_tags = settings.POLARITY_FEATURE_TAGS
        self.degree_tokens = Set()
        self.degree_tokens.add(feature_tags['INCREASE'][0])
        self.degree_tokens.add(feature_tags['DECREASE'][0])
        self.degree_tokens.add(feature_tags['UNCHANGE'][0])

    def  __load_intermediate_transfer_table(self):
        transfer_table_file = settings.POLARITY_DATA_FILES['intermediate_transfer_table_file']
        self.intermediate_status_transfer_dict = {}
        self.__load_transfer_table(transfer_table_file, self.intermediate_status_transfer_dict)

    def __load_final_transfer_table(self):
        transfer_table_file = settings.POLARITY_DATA_FILES['final_transfer_table_file']
        self.final_status_transfer_dict = {}
        self.__load_transfer_table(transfer_table_file, self.final_status_transfer_dict)

    def __load_transfer_table(self, table_file_name, status_transfer_dict):
        try:
            file_object = codecs.open(table_file_name, mode='r', encoding=settings.DEFAULT_ENCODING)
            for line in file_object:
                line = line.strip()
                if len(line) == 0 or line.startswith('#'):
                    continue
                data = line.split('\t', 4)
                l_token = data[0]
                r_token = data[1]
                next_token = data[2]
                action = data[3]
                if status_transfer_dict.has_key(l_token):
                    r_token_list = status_transfer_dict[l_token]
                else:
                    r_token_list = {}
                if r_token_list.has_key(r_token):
                    raise Exception("Duplicated token entry in status transfer table: %s->%s" % (l_token, r_token))
                else:
                    r_token_list[r_token] = (next_token, action)

                status_transfer_dict[l_token] = r_token_list

        finally:
            file_object.close()

    # 通过状态转移表获取极性
    def __get_sequence_polarity(self, isdebug=False):
        if isdebug:
            print self.token_list
        final_state_stack = self.__process_intermediate_states(isdebug)
        if isdebug:
            print final_state_stack

        if len(final_state_stack) > 1:
            #final_state_stack
            self.__process_final_states(final_state_stack, isdebug)
        return final_state_stack

    def __process_intermediate_states(self, isdebug=False):
        if len(self.token_list) == 0:
            return [QuestionPolarity.UNKNOWN] 
        working_stack = []
        # initialize the working stack
        working_stack.append(settings.POLARITY_FEATURE_TAGS['START'])
        working_stack.append(self.__move_in_one())
        is_list_empty = False
        while True:
            if len(working_stack) < 2:
                break
            l_token = working_stack[-2]
            r_token = working_stack[-1]

            next_token, action = self.__get_action(l_token, r_token, self.intermediate_status_transfer_dict, isdebug)
            if action == settings.POLARITY_ACTIONS['replace_L_R']:
                # replace_L_R
                self.__pop_LR_push(working_stack, next_token)
            elif action == settings.POLARITY_ACTIONS['del_L']:
                # del_L
                self.__pop_L_push(working_stack, None)
            else:
                move_in_token = self.__move_in_one()
                if action == settings.POLARITY_ACTIONS['del_L_move']:
                    # del_L_move
                    self.__pop_L_push(working_stack, move_in_token)
                elif action == settings.POLARITY_ACTIONS['del_R_move']:
                    # del_R_move
                    self.__pop_R_push(working_stack, move_in_token)
                elif action == settings.POLARITY_ACTIONS['del_LR_move']:
                    # del_LR_move
                    self.__pop_LR_push(working_stack, move_in_token)
                elif action == settings.POLARITY_ACTIONS['replace_L_move']:
                    # replace_L_move
                    self.__replace_L_push(working_stack, next_token, move_in_token)
                elif action == settings.POLARITY_ACTIONS['replace_LR_move']:   #new add 
                    # replace_LR_move
                    self.__replace_LR_push(working_stack, next_token, move_in_token)
                elif action == settings.POLARITY_ACTIONS['clear_move']:
                    # clear_move
                    self.__clear_push(working_stack, move_in_token)
                elif action == settings.POLARITY_ACTIONS['clear_replace_quit']:
                    # clear_replace_quit
                    self.__clear_push(working_stack, next_token)
                    break
                elif action == settings.POLARITY_ACTIONS['move']:
                    # move
                    self.__append_token(working_stack, move_in_token)
                else:
                    # default
                    self.__append_token(working_stack, move_in_token)

                if is_list_empty == True:
                    break

                if move_in_token == None:
                    working_stack.append(settings.POLARITY_FEATURE_TAGS['TERMINAL'])
                    is_list_empty = True
        return working_stack
            
    def __process_final_states(self, working_stack, isdebug=False):
        # processes the stack from left to right, 
        # since Chinese is right-headed, left-branching
        head_idx = 0
        while True:
            working_stack_size = len(working_stack)
            if working_stack_size < 2:
                break

            if head_idx + 1 >= working_stack_size:
                head_idx = 0

            l_token = working_stack[head_idx]
            r_token = working_stack[head_idx + 1]

            next_token, action = self.__get_action(l_token, r_token, self.final_status_transfer_dict, isdebug)
            if action == settings.POLARITY_ACTIONS['replace_L_R']:
                # replace_L_R
                self.__pop_head_LR_push(working_stack, next_token, head_idx)
            elif action == settings.POLARITY_ACTIONS['del_L']:
                # del_L
                self.__pop_head_L_push(working_stack, None, head_idx)
            elif action == settings.POLARITY_ACTIONS['del_LR']:
                # del_LR
                self.__pop_head_LR_push(working_stack, None, head_idx)
            elif action == settings.POLARITY_ACTIONS['nop']:
                # nop
                head_idx += 1
            else:
                raise Exception("Error state in final status processing! %s->%s" % (l_token, r_token))

    def __get_action(self, l_token, r_token, status_transfer_dict, isdebug):
        result = None,None
        if status_transfer_dict.has_key(l_token):
            r_token_list = status_transfer_dict[l_token]
            try:
                if r_token_list.has_key(r_token):
                    result = r_token_list[r_token]
                else:
                    result = r_token_list[settings.POLARITY_FEATURE_TAGS['DEFAULT']]
            except KeyError as ke:
                logger.error("Key %s,%s not found!", l_token, r_token)
                raise ke
        if isdebug:
            print "L:%s, R:%s, N:%s, A:%s" % (l_token, r_token, result[0], result[1])
        return result

    def  __move_in_one(self):
        if len(self.token_list) == 0:
            return None
        else:
            return self.token_list.pop(0) # pop the first token from the list

    def __pop_LR_push(self, stat_stack, new_token):
        stat_stack.pop() # right operand
        stat_stack.pop() # left operand
        self.__append_token(stat_stack, new_token)

    def __pop_head_LR_push(self, stat_stack, new_token, position):
        stat_stack.pop(position) # left operand
        stat_stack.pop(position) # right operand
        self.__prepend_token(stat_stack, new_token, position)

    def __pop_L_push(self, stat_stack, new_token):
        stat_stack.pop(-2) # left operand
        self.__append_token(stat_stack, new_token)

    def __pop_head_L_push(self, stat_stack, new_token, position):
        stat_stack.pop(position) # left operand
        self.__prepend_token(stat_stack, new_token, position)

    def __pop_R_push(self, stat_stack, new_token):
        stat_stack.pop() # right operand
        self.__append_token(stat_stack, new_token)

    def __replace_L_push(self, stat_stack, new_token, movein_token):
        stat_stack[-2]  = new_token # replace the left operand
        self.__append_token(stat_stack, movein_token)

    def __replace_LR_push(self, stat_stack, new_token, movein_token):
        stat_stack[-1] = new_token
        stat_stack[-2] = new_token
        self.__append_token(stat_stack, movein_token)

    def __clear_push(self, stat_stack, new_token):
        del stat_stack[:-1]
        self.__append_token(stat_stack, new_token)

    def __append_token(self, stat_stack, token):
        if token != None:
            stat_stack.append(token)

    def __prepend_token(self, stat_stack, token, position):
        if token != None:
            stat_stack.insert(position, token)

    class Token:
        def __init__(self, term_text, feature_list):
            self.term_text = term_text
            self.feature_list = feature_list

    # 输出特征词序列
    def __convert_text_to_feature_sequence(self, text_str, isdebug):
        #分词
        seg_terms = WordSegUtils.word_segment_phrase(text_str) #粗粒度
        tokens_list = []
        total_tokens_list = []
        negate = False
        aux_occurred = False
        for term in seg_terms:
            term_text = term['lexemeText']
            if self.features_set.has_key(term_text):
                token = self.Token(term_text, self.features_set[term_text])
                tokens_list.append(token)
            if self.clause_delimiter.search(term_text): 
                if (len(tokens_list) > 0 and self.token_delimiter != tokens_list[-1].term_text):
                    tokens_list.append(self.Token(self.token_delimiter, self.features_set[self.token_delimiter]))
                total_tokens_list += tokens_list
                tokens_list = []
        # adds the remaining tokens
        total_tokens_list += tokens_list
        self.token_list = [feature for token in total_tokens_list for feature in token.feature_list]

        if isdebug:
            if len(self.token_list) > 0:
                return ';'.join([token.term_text + ":" + ','.join(token.feature_list) for token in total_tokens_list]) + '\t' + '/'.join([term['lexemeText'] for term in seg_terms])
            else:
                return ''
        else:
            return [term['lexemeText'] for term in seg_terms]


