import spacy
from src.common import commonUtils
from src.common.logger import getLogger

logger = getLogger()

class SpacyModel:

    def __init__(self):
        self.zh_spacy_path = "D:\\磁盘资料\\LLMModels\\ModelScope\\spacy_english_chinese\\zh_core_web_sm\\zh_core_web_sm-3.7.0"
        self.en_spacy_path = "D:\\磁盘资料\\LLMModels\\ModelScope\\spacy_english_chinese\\en_core_web_sm\\en_core_web_sm-3.7.1"
        self.spacy_zh_model = self.new_zh_spacy_model()
        self.spacy_en_model = self.new_en_spacy_model()

    def new_zh_spacy_model(self):
        zh_nlp = spacy.load(self.zh_spacy_path)
        logger.info(f"SpacyModel new_zh_spacy_model: {self.zh_spacy_path}")
        return zh_nlp

    def new_en_spacy_model(self):
        en_nlp = spacy.load(self.en_spacy_path)
        logger.info(f"SpacyModel new_zh_spacy_model: {self.zh_spacy_path}")
        return en_nlp

    def extract_entity_relations(self, text):
        language = commonUtils.robust_detect_language(text)
        logger.info(f"SpacyModel extract_entity_relations language: {language}")
        spacy_nlp = None
        if language == "zh":
            spacy_nlp = self.spacy_zh_model
        else:
            spacy_nlp = self.spacy_en_model
        doc = spacy_nlp(text)
        return self.extract_text_triples(doc)

    def extract_text_triples(self, doc):
        # Step 1: 提取实体（去重，保留首次出现的描述）
        entity_dict = {}  # name -> {type, description}
        sent_spans = list(doc.sents)

        for ent in doc.ents:
            if ent.text not in entity_dict:
                # 找到该实体所在的句子作为 description
                desc = ""
                for sent in sent_spans:
                    if ent.start >= sent.start and ent.end <= sent.end:
                        desc = sent.text.strip()
                        break
                entity_dict[ent.text] = {
                    "name": ent.text,
                    "type": ent.label_,
                    "desc": desc
                }
        entities = list(entity_dict.values())
        logger.info(f"SpacyModel extract_text_triples entities len: {len(entities)}")

        # Step 2: 构建实体位置映射（token index → 实体名）
        token_to_entity = {}
        for ent in doc.ents:
            for i in range(ent.start, ent.end):
                token_to_entity[i] = ent.text

        # Step 3: 提取关系（主谓宾结构）
        relations = []
        for token in doc:
            if token.pos_ == "VERB":
                subj = None
                obj = None

                # 查找主语（主动/被动）
                for child in token.children:
                    if child.dep_ in ("nsubj", "nsubjpass") and child.i in token_to_entity:
                        subj = token_to_entity[child.i]
                        break

                # 查找宾语（直接宾语、介词宾语等）
                for child in token.children:
                    if child.dep_ in ("dobj", "pobj", "attr") and child.i in token_to_entity:
                        obj = token_to_entity[child.i]
                        break

                # 若主语和宾语都存在且不同，记录关系
                if subj and obj and subj != obj:
                    relations.append({
                        "source": subj,
                        "target": obj,
                        "type": token.lemma_  # 动词原形，如 "found", "lead"
                    })

        # 去重关系（可选）
        seen = set()
        unique_relations = []
        for rel in relations:
            key = (rel["source"], rel["target"], rel["type"])
            if key not in seen:
                seen.add(key)
                unique_relations.append(rel)
        logger.info(f"SpacyModel extract_text_triples unique_relations len: {len(unique_relations)}")
        return entities, unique_relations