import json
import re
import sys

import jieba
from py2neo import Graph
from pypinyin import lazy_pinyin


class ExamInfoExtractor:
    def __init__(self):
        self.graph = Graph("http://localhost:7474", auth=("neo4j", "111111"))
        self.field_mappings = {
            "exam_type": "reportingType",
            "outside_residency": "candidateResidence",
            "military_service": "MilitaryBase",
            "household_location": "Province",
            "candidate_feature": "candidateCharacteristic",
            "admission_category": "admissionCategory",
            "exam_level": "reportingLevel",
            "exam_subject": "reportingCategory",
            "study_location": "studyLocation",
            "study_mode": "learningForm",
            "exam_site": "ExamSite",
            "ck_school": "volunteerInstitutionName",
            "ck_major": "Major"
        }

    def to_pinyin(self, text):
        """中文转拼音"""
        if text in ["未知", "无", None]:
            return ""
        return ''.join(lazy_pinyin(text)).lower()

    def fuzzy_match(self, label, text):
        """两阶段模糊匹配（拼音优先）"""
        if not text or text == "未知":
            return "未知"

        pinyin = self.to_pinyin(text)
        query = f"""
        MATCH (n:{label})
        WHERE n.pinyin = '{pinyin}' AND n.examType = 'adultCollegeEntranceExam'
        RETURN n.name LIMIT 1
        """
        result = self.graph.run(query).data()
        if result:
            return result[0].get('n.name', '未知')

        query = f"""
        MATCH (n:{label})
        WITH '{text}' AS input, n
        ORDER BY apoc.text.levenshteinDistance(input, n.name) ASC
        LIMIT 1
        RETURN n.name
        """
        result = self.graph.run(query).data()
        return result[0].get('n.name', '未知') if result else "未知"

    def fuzzy_match_with_relationship(self, start_label, end_label, relationship_type, text, return_field="end"):
        """通过关系进行模糊匹配"""
        if not text or text == "未知":
            return "未知", "未知"

        pinyin = self.to_pinyin(text)
        query = f"""
        MATCH (s:{start_label})-[r:{relationship_type}]->(e:{end_label})
        WHERE e.pinyin = '{pinyin}' 
        RETURN s.name AS start_name, e.name AS end_name, r.code AS code LIMIT 1
        """
        result = self.graph.run(query).data()
        if result:
            if return_field == "end":
                return result[0].get('end_name', '未知'), result[0].get('code', '未知')
            else:
                return result[0].get('start_name', '未知'), result[0].get('code', '未知')

        query = f"""
        MATCH (s:{start_label})-[r:{relationship_type}]->(e:{end_label})
        WITH '{text}' AS input, s, r, e
        ORDER BY apoc.text.levenshteinDistance(input, e.name) ASC
        LIMIT 1
        RETURN s.name AS start_name, e.name AS end_name, r.code AS code
        """
        result = self.graph.run(query).data()
        if result:
            if return_field == "end":
                return result[0].get('end_name', '未知'), result[0].get('code', '未知')
            else:
                return result[0].get('start_name', '未知'), result[0].get('code', '未知')
        return "未知", "未知"

    def parse_address(self, address):
        """解析户籍地址（省市区三级）"""
        if not address or address == "未知":
            return "未知/未知/未知"

        seg = [w for w in jieba.cut(address) if len(w) > 1]
        hierarchy = {"province": "未知", "city": "未知", "district": "未知"}

        for word in seg[:2]:
            if word.endswith(('省', '市', '自治区')):
                hierarchy['province'] = self.fuzzy_match("Province", word)
                break

        for word in seg[:2]:
            if word.endswith(('市', '盟')):
                hierarchy['city'] = self.fuzzy_match("City", word)
                break

        if seg:
            hierarchy['district'] = self.fuzzy_match("County", seg[0])

        return f"{hierarchy['province']}/{hierarchy['city']}/{hierarchy['district']}"

    def extract_exam_info(self, text):
        if not text:
            return json.dumps({"error": "输入文本为空"}, ensure_ascii=False)

        text = (re.sub(r'[。\s]', '，', text) + "pipei").strip('，')

        patterns = {
            "exam_type": r"报考类型[是：](.+?)[，]",
            "outside_residency": r"居住地[：是](.+?)[，]",
            "military_service": r"服役地[：是](.+?)[，]",
            "household_location": r"户籍所在地[：是](.+?)[，]",
            "candidate_feature": r"考生特征[：是](.+?)[，]",
            "admission_category": r"招生类别[：是](.+?)[，]",
            "exam_level": r"报考层次[：是](.+?)[，]",
            "exam_subject": r"报考科类[：是](.+?)[，]",
            "study_location": r"学习地点[：是](.+?)[，]",
            "study_mode": r"学习形式[：是](.+?)[，]",
            "exam_site_name": r"报名点[：是](.+?)[，]",
            "postal_code": r"邮编[：是](\d{6})[，]",
            "ck_school": r"志愿院校[：是](.+?)[，]",
            "ck_major": r"意向专业[：是](.+?)[，]",
            "emergency_contact": r"紧急电话[：是](\d{11})[，。]"
        }

        extracted = {k: (re.search(p, text).group(1) if re.search(p, text) else "未知") for k, p in patterns.items()}

        print("extracted:")
        print(extracted)
        if extracted["exam_site_name"] != "未知":
            site_name = self.fuzzy_match("registrationPointName", extracted["exam_site_name"])
            if site_name != "未知":
                site_query = f"""
                MATCH (s:registrationPointName)
                WHERE s.name = '{site_name}'
                RETURN s.code as code, s.address as address, ID(s) as site_id, s.name as name
                LIMIT 1
                """
                site_data = self.graph.run(site_query).data()
                if site_data:
                    extracted.update({
                        "exam_site_code": site_data[0].get('code', '未知'),
                        "exam_site_address": site_data[0].get('address', '未知'),
                        "exam_site_id": site_data[0].get('site_id', '未知'),
                        "exam_site_name": site_data[0].get('name', '未知')
                    })

        processed = {
            "exam_type": self.fuzzy_match(self.field_mappings["exam_type"], extracted["exam_type"]),
            "outside_residency": self.fuzzy_match("County", extracted["outside_residency"]),
            "military_service": self.fuzzy_match("County", extracted["military_service"]),
            "household_location": self.parse_address(extracted["household_location"]),
            "candidate_feature": self.fuzzy_match("candidateCharacteristic", extracted["candidate_feature"]),
            "admission_category": self.fuzzy_match("admissionCategory", extracted["admission_category"]),
            "exam_level": self.fuzzy_match("reportingLevel", extracted["exam_level"]),
            "exam_subject": [self.fuzzy_match("reportingCategory", extracted["exam_subject"])],
            "study_location": self.fuzzy_match("studyLocation", extracted["study_location"]),
            "study_mode": self.fuzzy_match("learningForm", extracted["study_mode"]),
            "exam_site_code": extracted.get("exam_site_code", "未知"),
            "exam_site_name": extracted["exam_site_name"],
            "exam_site_address": extracted.get("exam_site_address", "未知"),
            "postal_code": extracted["postal_code"],
            "emergency_contact": extracted["emergency_contact"],
            "ck_school": self.fuzzy_match("volunteerInstitutionName", extracted["ck_school"]),
            "ck_major": self.fuzzy_match_with_relationship("volunteerInstitutionName", "Major", "HAS_MAJOR", extracted["ck_major"])[0],
            "ck_major_code": self.fuzzy_match_with_relationship("volunteerInstitutionName", "Major", "HAS_MAJOR", extracted["ck_major"])[1]
        }

        return json.dumps(processed, ensure_ascii=False)


if __name__ == "__main__":
    java_string = sys.argv[1]
    extractor = ExamInfoExtractor()
    result = extractor.extract_exam_info(java_string)
    print(result)