# coding=utf8
import sys

sys.path.append('..')
from bdp.i_crawler.i_entity_extractor.ttypes import EntityExtractorInfo, EntitySource
import common_parser_lib.toolsutil as toolsutil
from common_parser_lib.parser_tool import parser_tool
from entity_extractor_route import entity_route_obj
import json
import traceback
import time


class EntityExtractor:
    def __init__(self, conf):
        self.conf = conf
        self.log  = conf['log']
        self.parser_tool = parser_tool
        self.route = entity_route_obj

    def entity_extractor(self, parse_info):
        '''实体解析总入口'''
        # 1 加载入参
        begin_time   = time.time()
        base_info    = parse_info.base_info
        extract_info = parse_info.extract_info
        ex_status    = extract_info.ex_status
        topic_id     = extract_info.topic_id

        # 2 初始化返回结构体
        resp = toolsutil.result()

        # 3 检测异常
        if topic_id and topic_id != -1:
            topic_id = int(topic_id)
        else:
            resp['CODE']   = -10000
            resp['MSG']    = 'topic_id error,topic_id:%s'%topic_id
            self.log.warning('topic_id_error,\ttopic_id:%s \turl:%s'%(topic_id, base_info.url))
            return resp

        extract_data_len = len(extract_info.extract_data) if extract_info.extract_data else 0

        extractor = self.route.get_extractor(topic_id)

        self.log.info("start_entity_extract\turl:%s\textract_data:%s\ttopic_id:%s\tex_status:%s\tparser:%s" % (
            base_info.url, extract_data_len, topic_id, ex_status, extractor.__class__))


        if not extractor:
            resp['CODE'] = -10000
            resp['MSG'] = "extractor is None, topic_id:%s" % topic_id
            self.log.error("extractor is None, topic_id:%s" % topic_id)
        elif ex_status != int(self.conf['extract_state']['Extract_Success']) or extract_data_len == 0:
            resp['CODE'] = -10000
            resp['MSG'] = "extract_status fail or extract_data_len = 0"
        else:
            # 4 实体解析预处理
            extract_data_list = extractor.before_extract(extract_info)
            num_extract_data  = 0

            for extract_data in extract_data_list:
                # 5 实体解析主处理
                num_extract_data += 1
                try:
                    entity_data = extractor.entity_extract(parse_info, extract_data)

                    if isinstance(entity_data, list):
                        for entity in entity_data:
                            entity_extract_data = self.after_entity_extract(extractor, base_info, entity, extract_data, topic_id)
                            if entity_extract_data:
                                resp["LIST"].append(entity_extract_data)
                    else:
                        entity_extract_data = self.after_entity_extract(extractor, base_info, entity_data, extract_data, topic_id)
                        if entity_extract_data:
                            resp["LIST"].append(entity_extract_data)

                    resp["MSG"] += " %s extract_data in extract_data_list parser success" % num_extract_data
                except:
                    self.log.error("extract_error\tmsg:%s" % (traceback.format_exc()))
                    resp["MSG"] += " %s extract_data in extract_data_list error, ret:[%s] " % (
                    num_extract_data, traceback.format_exc())
                    resp["CODE"] = -10000


        end_time = time.time()
        self.log.info("finish_entity_extract\turl:%s\ttopic_id:%s\ttimecost:%.2f" % (base_info.url, topic_id, (end_time - begin_time) * 1000))
        resp['TOPIC_ID'] = topic_id
        return resp

    def after_entity_extract(self, extractor, base_info, entity_data, extract_data, topic_id):
        '''实体解析后续处理'''
        entity_extract_data = None
        entity_data   = extractor.after_extract(base_info.url, entity_data, extract_data)
        if entity_data:
            entity_source = EntitySource(url=base_info.url, site_id=base_info.site_id, site=base_info.site)
            entity = json.dumps(entity_data)
            entity_extract_data = EntityExtractorInfo(entity_data=entity, entity_source=entity_source, topic_id=topic_id)
        return entity_extract_data



if __name__ == "__main__":

    import sys

    sys.path.append('../')
    sys.path.append('../../')
    sys.path.append('../../../')

    extract_data = {
        "court": "山东省高级人民法院",
    }
    entity_data = {'province': '\xe5\xb1\xb1\xe4\xb8\x9c',
                   'court': '\xe5\xb1\xb1\xe4\xb8\x9c\xe7\x9c\x81\xe9\xab\x98\xe7\xba\xa7\xe4\xba\xba\xe6\xb0\x91\xe6\xb3\x95\xe9\x99\xa2',
                   'Param': '\xe8\xa3\x81\xe5\x88\xa4\xe6\x97\xa5\xe6\x9c\x9f:2016-10-25 TO 2016-10-25,\xe6\xb3\x95\xe9\x99\xa2\xe5\x9c\xb0\xe5\x9f\x9f:\xe5\xb1\xb1\xe4\xb8\x9c\xe7\x9c\x81,\xe4\xb8\xad\xe7\xba\xa7\xe6\xb3\x95\xe9\x99\xa2:\xe5\xb1\xb1\xe4\xb8\x9c\xe7\x9c\x81\xe9\xab\x98\xe7\xba\xa7\xe4\xba\xba\xe6\xb0\x91\xe6\xb3\x95\xe9\x99\xa2'}



