import json
import re
import sys

import jieba
from py2neo import Graph
from pypinyin import lazy_pinyin


class ExamInfoExtractor:
    def __init__(self):
        self.graph = Graph("http://localhost:7474",auth=("neo4j", "111111"))
        #self.graph = Graph("http://localhost:8848",auth=("test", "ljq123456"),name="neo4j")
        self.field_mappings = {
            "exam_type": "reportingType",
            "outside_residency": "candidateResidence",
            "military_service": "MilitaryBase",#*******
            "household_location": "Province",#*******
            "candidate_feature": "candidateCharacteristic",
            "admission_category": "admissionCategory",
            "exam_level": "reportingLevel",
            "exam_subject": "reportingCategory",
            "study_location": "studyLocation",
            "study_mode": "learningForm",
            "exam_site": "ExamSite",  # 报名点节点
            "ck_school": "volunteerInstitutionName",
            "ck_major": "Major"
        }

    def to_pinyin(self, text):
        """中文转拼音"""
        if text in ["未知", "无"]: return ""
        return ''.join(lazy_pinyin(text)).lower()

    def fuzzy_match(self, label, text):
        """两阶段模糊匹配（拼音优先）"""
        if not text or text == "未知": return "未知"

        # 拼音匹配
        pinyin = self.to_pinyin(text)
        query = f"""
        MATCH (n:{label})
        WHERE n.pinyin = '{pinyin}' and n.examType = 'adultCollegeEntranceExam'
        RETURN n.name LIMIT 1
        """
        result = self.graph.run(query).data()
        if result: return result[0]['n.name']

        # 编辑距离匹配
        query = f"""
        MATCH (n:{label})
        WITH '{text}' AS input, n
        ORDER BY apoc.text.levenshteinDistance(input, n.name) ASC
        LIMIT 1
        RETURN n.name
        """
        result = self.graph.run(query).data()
        return result[0]['n.name'] if result else "未知"

    def fuzzy_match_with_relationship(self, start_label, end_label, relationship_type, text, return_field="end"):
        """通过关系进行模糊匹配"""
        if not text or text == "未知": return "未知", "未知"

        # 拼音匹配
        pinyin = self.to_pinyin(text)
        query = f"""
        MATCH (s:{start_label})-[r:{relationship_type}]->(e:{end_label})
        WHERE e.pinyin = '{pinyin}' 
        RETURN s.name AS start_name, e.name AS end_name, r.code AS code LIMIT 1
        """
        result = self.graph.run(query).data()
        if result:
            if return_field == "end":
                return result[0]['end_name'], result[0]['code']
            else:
                return result[0]['start_name'], result[0]['code']

        # 编辑距离匹配
        query = f"""
        MATCH (s:{start_label})-[r:{relationship_type}]->(e:{end_label})
        WITH '{text}' AS input, s, r, e
        ORDER BY apoc.text.levenshteinDistance(input, e.name) ASC
        LIMIT 1
        RETURN s.name AS start_name, e.name AS end_name, r.code AS code
        """
        result = self.graph.run(query).data()
        if result:
            if return_field == "end":
                return result[0]['end_name'], result[0]['code']
            else:
                return result[0]['start_name'], result[0]['code']
        return "未知", "未知"

    def fuzzy_match2(self, label, text):
        """两阶段模糊匹配（拼音优先）"""
        if not text or text == "未知": return "未知"

        # 拼音匹配
        pinyin = self.to_pinyin(text)
        query = f"""
        MATCH (n:{label})
        WHERE n.pinyin = '{pinyin}' 
        RETURN n.name LIMIT 1
        """
        result = self.graph.run(query).data()
        if result: return result[0]['n.name']

        # 编辑距离匹配
        query = f"""
        MATCH (n:{label})
        WITH '{text}' AS input, n
        ORDER BY apoc.text.levenshteinDistance(input, n.name) ASC
        LIMIT 1
        RETURN n.name
        """
        result = self.graph.run(query).data()
        return result[0]['n.name'] if result else "未知"

    def parse_address(self, address):
        """解析户籍地址（省市区三级）"""

        seg = [w for w in jieba.cut(address) if len(w) > 1]

        hierarchy = {"province": "未知", "city": "未知", "district": "未知"}

        # 省级匹配
        for word in seg[:2]:
            if word.endswith(('省', '市', '自治区')):
                hierarchy['province'] = self.fuzzy_match("Province", word)
                seg.remove(word)
                break

        # 市级匹配
        for word in seg[:2]:
            if word.endswith(('市', '盟')):
                hierarchy['city'] = self.fuzzy_match("City", word)
                seg.remove(word)
                break

        # 区级匹配
        if seg:
            hierarchy['district'] = self.fuzzy_match("County", seg[0])

        return f"{hierarchy['province']}/{hierarchy['city']}/{hierarchy['district']}"

    def extract_exam_info(self, text):
        # 预处理输入文本
        text = (re.sub(r'[。\s]', '，', text)+"pipei").strip('，')

        # 使用正则表达式提取关键字段
        patterns = {
            "exam_type": r"报考类型[是：](.+?)[，]",
            "outside_residency": r"居住地[：是](.+?)[，]",
            "military_service": r"服役地[：是](.+?)[，]",
            "household_location": r"户籍所在地[：是](.+?)[，]",
            "candidate_feature": r"考生特征[：是](.+?)[，]",
            "admission_category": r"招生类别[：是](.+?)[，]",
            "exam_level": r"报考层次[：是](.+?)[，]",
            "exam_subject": r"报考科类[：是](.+?)[，]",
            "study_location": r"学习地点[：是](.+?)[，]",
            "study_mode": r"学习形式[：是](.+?)[，]",
            "exam_site_name": r"报名点[：是](.+?)[，]",
            "postal_code": r"邮编[：是](\d{6})[，]",
            "ck_school": r"志愿院校[：是](.+?)[，]",
            "ck_major": r"意向专业[：是](.+?)[，]",
            #"emergency_contact": r"紧急电话[：是](\d{11})[，]"
            "emergency_contact": r"紧急电话[：是](\d{11})[，。]"
        }

        # 提取原始字段值
        extracted = {k: (re.search(p, text).group(1) if re.search(p, text) else "未知")
                     for k, p in patterns.items()}

        # 报名点详细信息查询
        if extracted["exam_site_name"] != "未知":
            # site_query = f"""
            # MATCH (s:registrationPointName)
            # WHERE s.name = '{extracted["exam_site_name"]}'
            # RETURN s.code as code, s.address as address, ID(s) as site_id,s.name as name
            # LIMIT 1
            # """
            site_name = self.fuzzy_match("registrationPointName", extracted["exam_site_name"])
            if site_name != "未知":
                site_query = f"""
                MATCH (s:registrationPointName)
                WHERE s.name = '{site_name}'
                RETURN s.code as code, s.address as address, ID(s) as site_id,s.name as name
                LIMIT 1
                """
                site_data = self.graph.run(site_query).data()
                if site_data:
                    extracted.update({
                        "exam_site_code": site_data[0]['code'],
                        "exam_site_address": site_data[0]['address'],
                        "exam_site_id": site_data[0]['site_id'],
                        "exam_site_name" : site_data[0]['name']
                    })



        # 结构化数据处理
        processed = {

            "exam_type": self.fuzzy_match(self.field_mappings["exam_type"],extracted["exam_type"]),
            "outside_residency": self.fuzzy_match("County",extracted["outside_residency"]),
            "military_service": self.fuzzy_match("County",extracted["military_service"]),
            "household_location": self.parse_address(extracted["household_location"]),
            "candidate_feature": self.fuzzy_match("candidateCharacteristic", extracted["candidate_feature"]),
            "admission_category": self.fuzzy_match("admissionCategory", extracted["admission_category"]),
            "exam_level": self.fuzzy_match("reportingLevel", extracted["exam_level"]),
            # "exam_subject": [self.fuzzy_match("reportingCategory", sub)
            #                  for sub in re.split(r'、|和', extracted["exam_subject"])],
            "exam_subject": [self.fuzzy_match("reportingCategory", extracted["exam_subject"])],
            "study_location": self.fuzzy_match("studyLocation", extracted["study_location"]),
            "study_mode": self.fuzzy_match("learningForm", extracted["study_mode"]),
            "exam_site_code": extracted.get("exam_site_code", "未知"),
            "exam_site_name": extracted["exam_site_name"],
            "exam_site_address": extracted.get("exam_site_address", "未知"),
            #"site_id": extracted.get("exam_site_id", "未知"),
            "postal_code": extracted["postal_code"],
            "emergency_contact": extracted["emergency_contact"],
            "ck_school": self.fuzzy_match("volunteerInstitutionName", extracted["ck_school"]),
            "ck_major": self.fuzzy_match_with_relationship("volunteerInstitutionName", "Major", "HAS_MAJOR",extracted["ck_major"])[0] if self.fuzzy_match_with_relationship("volunteerInstitutionName", "Major", "HAS_MAJOR",extracted["ck_major"])[0] else "未知",
            "ck_major_code": self.fuzzy_match_with_relationship("volunteerInstitutionName", "Major", "HAS_MAJOR",extracted["ck_major"])[1] if self.fuzzy_match_with_relationship("volunteerInstitutionName", "Major", "HAS_MAJOR",extracted["ck_major"])[1] else "未知"

            #"ck_major": self.fuzzy_match("Major", extracted["ck_major"])

        }

        return json.dumps(processed, ensure_ascii=False)


# 使用示例
if __name__ == "__main__":
    java_string = sys.argv[1]
    # java_string = input("Please input the text: ")
    # print(f"Received from Java: {java_string}")
    #service = Neo4jNerService()
    sample_text = """
    报考类型是区内户籍的报考，居住地是清水的河县，户籍所在地是内蒙古自治区呼和浩特市赛罕区，服役地是赛罕的区，
    考生特征是扶贫，招生类别是统一的考试，报考层次是专的升本，报考科类是理的工类，
    学习地点是虎男，学习形式是业的余，报名点是新城的区，邮编是310000，
    志愿院校是四川农业的大学，意向专业是计算机科学与的技术，紧急电话是13812345678，
    """

    extractor = ExamInfoExtractor()
    result = extractor.extract_exam_info(java_string)
    print(result)