# coding=utf-8
# 审判流程实体解析

import json
import sys
import re

sys.path.append("..")
sys.path.append("../../")

from i_entity_extractor.extractors.default.default_extractor import DefaultExtractor
from i_entity_extractor.common_parser_lib import toolsutil
import judge_process_conf
import copy


class JudgeProcessExtractor(DefaultExtractor):
    def __init__(self, topic_info, log):
        DefaultExtractor.__init__(self, topic_info, log)
        self.litigant_seps = ['\r','\n','。','，','|',' ',';',',']

    def entity_extract(self, parse_info, extract_data):
        '''法院公告实体解析入口'''
        extract_info = parse_info.extract_info
        url = parse_info.base_info.url
        self.log.info("judege_wenshu_start_parse\turl:%s\ttopic_id:%s" % (url, extract_info.topic_id))

        entity_data = self.format_extract_data(extract_data)
        self.log.info("judege_wenshu_finish_parse\turl:%s\ttopic_id:%s" % (url, extract_info.topic_id))

        return entity_data

    def split_person(self, persons_str):
        person_list = []
        pars = re.split(';|, ', persons_str.strip())
        for par in pars:
            if len(par.strip()) > 0:
                person_list.append(par.strip())
        return person_list

    def format_extract_data(self, extract_data):
        '''格式化数据'''
        entity_data = copy.deepcopy(extract_data)
        court = extract_data.get("court")
        province = extract_data.get("province")
        if not province:
            province = self.parser_tool.province_parser.get_province(court)
            entity_data["province"] = province

        self.log.info(extract_data)
        extra_data = extract_data.get("extra_data", [])
        if extra_data:
            defendant_list = []
            plaintiff_list = []
            for extra in extra_data:
                identity_type = extra.get("identity_type", "")
                name = extra.get("name", "")
                if unicode(identity_type) in judge_process_conf.defendant_keyword_list:
                    defendant_list.append(name)
                elif unicode(identity_type) in judge_process_conf.plaintiff_keyword_list:
                    plaintiff_list.append(name)
                else:
                    pass
            litigant_list = list(set(defendant_list + plaintiff_list))
            litigants = ','.join(litigant_list)

            entity_data["defendant_list"] = defendant_list
            entity_data["plaintiff_list"] = plaintiff_list
            entity_data["litigant_list"] = litigant_list
            entity_data["litigants"] = litigants
        else:
            entity_data["defendant_list"] = toolsutil.my_split(extract_data.get("defendant_list",""),self.litigant_seps)
            entity_data["plaintiff_list"] = toolsutil.my_split(extract_data.get("plaintiff_list",""),self.litigant_seps)
            entity_data["litigant_list"]  = toolsutil.my_split(extract_data.get("litigant_list",""),self.litigant_seps)
            entity_data["litigants"]      = ','.join(entity_data["litigant_list"])

        return entity_data


if __name__ == '__main__':

    import pytoml
    import sys

    sys.path.append('../../')
    from conf import get_config
    from bdp.i_crawler.i_extractor.ttypes import BaseInfo, ExtractInfo, PageParseInfo

    with open('../../entity.toml', 'rb') as config:
        config = pytoml.load(config)
    conf = get_config(config)
    import common

    topic_id = 37
    from entity_extractor_route import EntityExtractorRoute

    route = EntityExtractorRoute()
    topic_info = route.all_topics.get(topic_id, None)
    obj = JudgeProcessExtractor(topic_info, common.log)

    import time

    begin_time = time.time()
    extract_data = {
            "case_id": "",
            "close_date": "",
            "court": "",
            "defendant_list": "fdis|fsd",
            "filing_date": "",
            "litigant_list": "凤凰大厦 史蒂夫",
            "plaintiff_list": "zhang,fdsf",
            "province": "湖北",
            "status": ""
        }

    src_url = "http://www.hkfy.gov.cn/plus/diy_court_view.php?id=13540"
    data = json.dumps(extract_data)
    extract_info = ExtractInfo(ex_status=2, extract_data=data, topic_id=37)
    base_info = BaseInfo(url=src_url)
    parser_info = PageParseInfo(base_info=base_info, extract_info=extract_info)
    entity_data = obj.entity_extract(parser_info, extract_data)
    entity_data = obj.after_extract(src_url,entity_data,extract_data)
    for key, value in entity_data.items():
        if isinstance(value, list):
            for i in value:
                print key, ":", i
        elif isinstance(value, dict):
            for key2, value2 in value.items():
                print key2, ":", value2
        else:
            print key, ":", value
    print "time_cost:", time.time() - begin_time
