# coding=utf8
import sys
import json
import time
sys.path.append('../../')
from i_entity_extractor.extractors.default.default_extractor import DefaultExtractor
import copy


class JudgeWenshuExtractor(DefaultExtractor):
    def __init__(self, topic_info, log):
        DefaultExtractor.__init__(self, topic_info, log)
        self.case_type_map = {"1": u"刑事案件", "2": u"民事案件", "3": u"行政案件", "4": u"赔偿案件", "5": u"执行案件"}
        self.case_id_type_map = {u"刑": u"刑事案件", u"民": u"民事案件",u"商": u"民事案件", u"行": u"行政案件", u"赔": u"赔偿案件", u"执": u"执行案件", }

    def entity_extract(self, parse_info, extract_data):
        '''法院公告实体解析入口'''
        extract_info = parse_info.extract_info
        url = parse_info.base_info.url
        self.log.info("judege_wenshu_start_parse\turl:%s\ttopic_id:%s" % (url, extract_info.topic_id))

        entity_data = self.format_extract_data(extract_data)
        self.log.info("judege_wenshu_finish_parse\turl:%s\ttopic_id:%s" % (url, extract_info.topic_id))

        return entity_data

    def format_extract_data(self, extract_data):
        '''实体解析抽取数据'''
        entity_data   = copy.deepcopy(extract_data)
        case_date     = extract_data.get("case_date")
        doc_content   = extract_data.get('doc_content')
        litigants     = extract_data.get('litigants')
        court         = extract_data.get("court")
        province      = extract_data.get("province")
        case_cause    = extract_data.get("case_cause")
        case_type     = extract_data.get("case_type")
        case_id       = extract_data.get("case_id")
        procedure     = extract_data.get("procedure")
        case_name     = extract_data.get("case_name")



        # 详情页处理
        tmp_entity_data = {}
        company_list = None
        ref_ids      = None
        if doc_content and u'文档内容为空' not in unicode(doc_content):
            tmp_entity_data = self.parser_tool.wenshu_parser.do_parser(doc_content, litigants)
            if case_id == None:
                case_id = self.parser_tool.caseid_parser.get_case_id(doc_content)
            ref_ids = self.parser_tool.caseid_parser.get_case_ids(doc_content)

            if case_cause == None:
                case_cause = self.get_case_cause(doc_content)

        #company_list = self.parser_tool.company_parser.get_company_list(doc_content, wenshu_conf.company_end_list)
        if case_date == None:
            case_date = tmp_entity_data.get("case_date")
        if court == None:
            court = tmp_entity_data.get("court")
        if court != None and  province == None:
            province = self.parser_tool.province_parser.get_province(court)

        if case_type != None:
            if self.case_type_map.has_key(str(case_type)):
                case_type = self.case_type_map[str(case_type)]
            else:
                case_type = str(case_type).replace(' ', '')
        else:
            if case_id != None:
                for key, value in self.case_id_type_map.items():
                    if key in case_id:
                        case_type = value

        litigants = tmp_entity_data.get("litigants")

        if procedure == None:
            case_name = case_name if case_name else ""
            procedure = self.get_procedure_from_title(case_name)
            if procedure == "":
                procedure = tmp_entity_data.get("procedure")


        entity_data["max_money"]          = tmp_entity_data.get("max_money")
        entity_data["doc_content"]        = doc_content
        entity_data["ref_ids"]            = ref_ids
        entity_data["all_money"]          = tmp_entity_data.get("all_money")
        entity_data["company_list"]       = company_list
        entity_data["judge_content"]      = tmp_entity_data.get("judge_content")
        entity_data["case_cause"]         = case_cause
        entity_data["province"]           = province
        entity_data["plaintiff_list"]     = tmp_entity_data.get("plaintiff_list")
        entity_data["defendant_list"]     = tmp_entity_data.get("defendant_list")
        entity_data["litigants"]          = litigants
        entity_data["litigant_list"]      = tmp_entity_data.get("litigant_list")
        entity_data["litigant_info_list"] = tmp_entity_data.get("litigant_info_list")
        entity_data["judiciary_list"]     = tmp_entity_data.get("judiciary_list")
        entity_data["chain_case_id"]      = tmp_entity_data.get("chain_case_id")
        entity_data["court"]              = court
        entity_data["case_type"]          = case_type
        entity_data["procedure"]          = procedure
        entity_data["case_id"]            = case_id
        entity_data["case_date"]          = case_date
        entity_data["case_name"]          = case_name

        return entity_data

    def get_case_cause(self, content):
        '''获取案由'''
        case_cause = self.parser_tool.case_cause_parser.get_case_causes(content)
        if len(case_cause) > 0:
            return case_cause[0]
        else:
            return ''

    def get_procedure_from_title(self,title):
        '''从标题中获取审判程序'''
        title = unicode(title)
        if title.find(u'一审') != -1:
            procedure = u'一审'
        elif title.find(u'二审') != -1:
            procedure = u'二审'
        elif title.find(u'再审') != -1:
            procedure = u'再审'
        elif title.find(u'刑罚变更') != -1 or title.find(u'减刑')!= -1:
            procedure = u'刑罚变更'
        else:
            procedure = u""
        return procedure





if __name__ == '__main__':
    import pytoml
    import sys

    sys.path.append('../../')
    from conf import get_config
    from bdp.i_crawler.i_extractor.ttypes import BaseInfo, CrawlInfo, ExtractInfo, PageParseInfo

    with open('../../entity.toml', 'rb') as config:
        config = pytoml.load(config)
    conf = get_config(config)
    import common

    topic_id = 32
    from entity_extractor_route import EntityExtractorRoute
    from i_entity_extractor.common_parser_lib.mongo import MongDb
    import json
    import traceback

    route = EntityExtractorRoute()
    topic_info = route.all_topics.get(topic_id, None)
    begin_time = time.time()
    obj = JudgeWenshuExtractor(topic_info, common.log)

    mongo_conf = {
        'host': '172.16.215.2',
        'port': 40042,
        'final_db': 'final_data',
        'username': "readme",
        'password': "readme",
    }
    db = MongDb(mongo_conf['host'], mongo_conf['port'], mongo_conf['final_db'],
                mongo_conf['username'],
                mongo_conf['password'])

    cursor = db.db["judgement_wenshu"].find({})
    num = 0
    for item in cursor:
        try:
            num += 1
            src_url = item.get("_src")[0]['url']
            extract_data = {
                "datas": [
                    {
                        "_site_record_id": "（2016）冀1023民初379号",
                        "bulletin_date": "",
                        "case_date": "",
                        "case_id": "（2016）冀1023民初379号",
                        "case_name": "",
                        "case_type": "",
                        "court": "",
                        "doc_content": "",
                        "doc_id": "57679d8b-8f24-4eb1-8b58-5521a3b51419",
                        "litigants": "",
                        "procedure": ""
                    },
                    {
                        "_site_record_id": "（2016）冀1023民初379号",
                        "bulletin_date": "",
                        "case_date": "",
                        "case_id": "（2016）冀1023民初379号",
                        "case_name": "",
                        "case_type": "",
                        "court": "",
                        "doc_content": "",
                        "doc_id": "57679d8b-8f24-4eb1-8b58-5521a3b51455",
                        "litigants": "",
                        "procedure": ""
                    }
                ]
            }

            data = json.dumps(extract_data)
            base_info = BaseInfo(
                url="http://wenshu.court.gov.cn/content/content?DocID=57679d8b-8f24-4eb1-8b58-5521a3b51419", site_id=1)
            extract_info = ExtractInfo(ex_status=2, extract_data=data)
            parser_info = PageParseInfo(base_info=base_info, extract_info=extract_info)
            entity_data = obj.entity_extract(parser_info, extract_data)
            entity_data = obj.after_extract(src_url, entity_data, extract_data)

            print "-----------------------------"
            for key, value in entity_data.items():
                if isinstance(value, list):
                    for i in value:
                        print key, ":", i
                elif isinstance(value, dict):
                    for key2, value2 in value.items():
                        print key2, ":", value2
                else:
                    print key, ":", value

            if num %10 == 0:
                break
        except Exception as e:
            print traceback.format_exc()


    print "time_cost:",time.time() - begin_time
