import jieba
import jieba.posseg as pseg
import re
import regex
import numpy as np
from collections import defaultdict, Counter
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.cluster import KMeans
from sklearn.metrics.pairwise import cosine_similarity
import pandas as pd

class EnhancedKGTripletExtractor:
    def __init__(self):
        """初始化增强版知识图谱三元组提取器"""
        self.load_resources()
        self.init_ml_components()
        
    def load_resources(self):
        """加载语言资源和规则"""
        # 扩展实体词典
        entities = [
            # 中文实体
            '清华大学', '北京大学', '中国科学院', '哈佛大学', '麻省理工学院',
            '人工智能', '机器学习', '深度学习', '自然语言处理', '计算机视觉',
            '阿里巴巴', '腾讯', '百度', '华为', '小米', '字节跳动',
            '马云', '马化腾', '李彦宏', '任正非', '雷军', '张一鸣',
            '爱因斯坦', '牛顿', '达尔文', '居里夫人', '霍金',
            # 英文实体
            'Google', 'Microsoft', 'Apple', 'Facebook', 'Amazon', 'Tesla',
            'OpenAI', 'ChatGPT', 'GPT', 'BERT', 'Transformer',
            'Python', 'Java', 'JavaScript', 'C++', 'Go', 'Rust',
            'GitHub', 'Stack Overflow', 'Linux', 'Windows', 'macOS',
            'iPhone', 'Android', 'iOS', 'API', 'SQL', 'NoSQL',
            'Docker', 'Kubernetes', 'AWS', 'Azure', 'GCP'
        ]
        
        for entity in entities:
            jieba.add_word(entity, freq=1000, tag='nt' if any('\u4e00' <= char <= '\u9fff' for char in entity) else 'en')
        
        # 英文实体识别正则表达式
        self.english_patterns = {
            'company': r'\b[A-Z][a-zA-Z]*(?:\s+[A-Z][a-zA-Z]*)*(?:\s+(?:Inc|Corp|Ltd|LLC|Co)\.?)?\b',
            'technology': r'\b[A-Z]{2,}(?:-[A-Z]{2,})*\b|[A-Z][a-z]+(?:[A-Z][a-z]+)*\b',
            'programming': r'\b(?:Python|Java|JavaScript|TypeScript|C\+\+|C#|Go|Rust|PHP|Ruby|Swift|Kotlin|Scala|R|MATLAB)\b',
            'platform': r'\b(?:GitHub|GitLab|Stack\s+Overflow|LinkedIn|Twitter|Facebook|Instagram|YouTube|TikTok)\b',
            'product': r'\b(?:iPhone|iPad|Android|Windows|macOS|Linux|iOS|ChatGPT|GPT-\d+|BERT|Transformer)\b',
            'acronym': r'\b[A-Z]{2,}\b'
        }
        
        # 数字和日期模式
        self.numeric_patterns = {
            'year': r'\b(?:19|20)\d{2}年?\b',
            'date': r'\b\d{1,2}月\d{1,2}日?\b|\b\d{4}-\d{1,2}-\d{1,2}\b|\b\d{1,2}/\d{1,2}/\d{4}\b',
            'number': r'\b\d+(?:\.\d+)?(?:[万千百十亿兆]|[kmgtKMGT])?[件个只台套部本篇首条项次人天年月日]\b',
            'percentage': r'\b\d+(?:\.\d+)?%\b',
            'money': r'\b(?:￥|\$|€|£)?\d+(?:\.\d+)?(?:元|美元|欧元|英镑|万|亿)?\b',
            'version': r'\bv?\d+(?:\.\d+)*(?:-[a-z]+\d*)?\b'
        }
        
        # URL和邮箱模式
        self.web_patterns = {
            'url': r'https?://[^\s]+|www\.[^\s]+',
            'email': r'\b[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\.[A-Z|a-z]{2,}\b',
            'domain': r'\b[a-zA-Z0-9-]+\.[a-zA-Z]{2,}\b'
        }
        
        # 扩展关系模式
        self.relation_patterns = {
            'explicit_relations': {
                # 中文关系词
                '出生', '出生于', '诞生', '诞生于', '生于', '创建', '创立', '成立', '建立',
                '发明', '发现', '创造', '创作', '撰写', '写作', '著作', '编写', '出版', '开发',
                '担任', '就职', '就职于', '工作', '工作于', '任职', '供职', '领导', '管理',
                '毕业', '毕业于', '就读', '就读于', '求学', '深造', '进修', '学习', '研究',
                '来自', '位于', '处于', '坐落', '建于', '设立', '设在', '拥有', '持有',
                '使用', '应用', '采用', '运用', '利用', '基于', '依据', '根据', '属于',
                '包含', '包括', '涵盖', '连接', '链接', '联系', '合作', '协作', '参与',
                # 英文关系词
                'is', 'was', 'were', 'are', 'created', 'founded', 'developed', 'invented',
                'wrote', 'published', 'worked', 'studied', 'graduated', 'located', 'based',
                'used', 'uses', 'owns', 'acquired', 'merged', 'partnered', 'collaborated',
                'released', 'launched', 'announced', 'supports', 'includes', 'contains'
            },
            'verb_flags': {'v', 'vn', 'vd', 'vg', 'vi', 'vl', 'vf'},
            'adj_flags': {'a', 'ad', 'ag', 'al', 'an'},
            'prep_flags': {'p'}
        }
        
        # 停用词（中英文）
        self.stop_words = {
            # 中文停用词
            '的', '了', '和', '与', '及', '在', '是', '有', '我', '你', '他', '她', '它',
            '这', '那', '这个', '那个', '一个', '一些', '很', '非常', '十分', '比较',
            '为了', '因为', '所以', '但是', '而且', '或者', '如果', '虽然', '然而',
            '也', '还', '都', '就', '只', '才', '又', '再', '更', '最', '会', '能',
            # 英文停用词
            'the', 'a', 'an', 'and', 'or', 'but', 'in', 'on', 'at', 'to', 'for',
            'of', 'with', 'by', 'this', 'that', 'these', 'those', 'i', 'you', 'he',
            'she', 'it', 'we', 'they', 'is', 'are', 'was', 'were', 'be', 'been',
            'have', 'has', 'had', 'do', 'does', 'did', 'will', 'would', 'could',
            'should', 'may', 'might', 'can', 'must'
        }
        
        # 代词映射
        self.pronoun_mapping = {
            '他': None, '她': None, '它': None, '这': None, '那': None,
            'he': None, 'she': None, 'it': None, 'this': None, 'that': None
        }
    
    def init_ml_components(self):
        """初始化机器学习组件"""
        # TF-IDF向量化器用于关系相似度计算
        self.tfidf_vectorizer = TfidfVectorizer(
            max_features=1000,
            stop_words=list(self.stop_words),
            ngram_range=(1, 2),
            min_df=1
        )
        
        # 用于存储已学习的关系模式
        self.learned_patterns = defaultdict(list)
        self.relation_clusters = None
        
    def extract_triplets(self, text, normalize_relations=True, use_ml_enhancement=True):
        """
        从文本中提取三元组（增强版）
        :param text: 输入文本
        :param normalize_relations: 是否标准化关系
        :param use_ml_enhancement: 是否使用机器学习增强
        :return: 三元组列表
        """
        # 预处理文本
        preprocessed_text = self.preprocess_text(text)
        
        # 段落级处理
        paragraphs = self.split_paragraphs(preprocessed_text)
        all_triplets = []
        
        for paragraph in paragraphs:
            # 提取实体（包括英文）
            entities = self.extract_enhanced_entities(paragraph)
            
            # 分句处理
            sentences = self.split_sentences(paragraph)
            paragraph_triplets = []
            
            for i, sentence in enumerate(sentences):
                # 基础三元组提取
                base_triplets = self.extract_base_triplets(sentence, normalize_relations)
                
                # 增强提取（英文支持）
                enhanced_triplets = self.extract_enhanced_triplets(sentence, entities, normalize_relations)
                
                # 机器学习增强
                if use_ml_enhancement:
                    ml_triplets = self.ml_enhanced_extraction(sentence, entities, normalize_relations)
                    enhanced_triplets.extend(ml_triplets)
                
                # 合并所有提取结果
                sentence_triplets = base_triplets + enhanced_triplets
                
                # 指代消解
                resolved_triplets = self.resolve_references(sentence_triplets, entities, i, sentences)
                paragraph_triplets.extend(resolved_triplets)
            
            all_triplets.extend(paragraph_triplets)
        
        # 后处理和去重
        filtered_triplets = self.post_process_triplets(all_triplets)
        return filtered_triplets
    
    def preprocess_text(self, text):
        """文本预处理"""
        # 清理文本
        text = re.sub(r'\s+', ' ', text)  # 标准化空格
        text = text.strip()
        
        # 处理特殊字符
        text = re.sub(r'[""''`]', '"', text)
        text = re.sub(r'[—–]', '-', text)
        
        return text
    
    def extract_enhanced_entities(self, text):
        """增强版实体提取（支持中英文混合）"""
        entities = []
        
        # 1. 基于jieba的中文实体识别
        words = pseg.cut(text)
        for word, flag in words:
            if self.is_valid_entity(word, flag):
                entities.append({
                    'text': word,
                    'type': self.classify_entity_type(word, flag),
                    'position': text.find(word),
                    'source': 'jieba'
                })
        
        # 2. 英文实体识别
        for pattern_name, pattern in self.english_patterns.items():
            matches = regex.finditer(pattern, text, regex.IGNORECASE)
            for match in matches:
                entity_text = match.group().strip()
                if len(entity_text) > 1 and entity_text not in self.stop_words:
                    entities.append({
                        'text': entity_text,
                        'type': f'english_{pattern_name}',
                        'position': match.start(),
                        'source': 'regex_english'
                    })
        
        # 3. 数字和日期实体识别
        for pattern_name, pattern in self.numeric_patterns.items():
            matches = regex.finditer(pattern, text)
            for match in matches:
                entities.append({
                    'text': match.group(),
                    'type': f'numeric_{pattern_name}',
                    'position': match.start(),
                    'source': 'regex_numeric'
                })
        
        # 4. 网络相关实体识别
        for pattern_name, pattern in self.web_patterns.items():
            matches = regex.finditer(pattern, text, regex.IGNORECASE)
            for match in matches:
                entities.append({
                    'text': match.group(),
                    'type': f'web_{pattern_name}',
                    'position': match.start(),
                    'source': 'regex_web'
                })
        
        # 去重并按位置排序
        unique_entities = []
        seen_texts = set()
        
        for entity in sorted(entities, key=lambda x: x['position']):
            if entity['text'] not in seen_texts:
                unique_entities.append(entity)
                seen_texts.add(entity['text'])
        
        return unique_entities
    
    def is_valid_entity(self, word, flag):
        """判断是否为有效实体"""
        if len(word) < 2 or word in self.stop_words:
            return False
        
        # 中文实体判断
        if flag.startswith('n') or flag in {'nr', 'ns', 'nt'}:
            return True
        
        # 英文实体判断
        if flag == 'en' or regex.match(r'^[A-Za-z]+$', word):
            return True
        
        return False
    
    def classify_entity_type(self, word, flag):
        """分类实体类型"""
        if flag == 'nr':
            return 'person'
        elif flag == 'ns':
            return 'location'
        elif flag == 'nt':
            return 'organization'
        elif flag.startswith('n'):
            return 'noun'
        elif flag == 'en' or regex.match(r'^[A-Za-z]+$', word):
            return 'english'
        else:
            return 'other'
    
    def extract_base_triplets(self, sentence, normalize_relations):
        """基础三元组提取"""
        words = pseg.cut(sentence)
        words_list = [(word.word, word.flag) for word in words]
        
        triplets = []
        
        # 1. 显式关系词提取
        triplets.extend(self.extract_explicit_relations(words_list, normalize_relations))
        
        # 2. 动词关系提取
        triplets.extend(self.extract_verb_relations(words_list, normalize_relations))
        
        # 3. "是"字句提取
        triplets.extend(self.extract_is_relations(words_list, normalize_relations))
        
        # 4. 介词关系提取
        triplets.extend(self.extract_prep_relations(words_list, normalize_relations))
        
        return triplets
    
    def extract_enhanced_triplets(self, sentence, entities, normalize_relations):
        """增强版三元组提取"""
        triplets = []
        entity_texts = [e['text'] for e in entities]
        
        # 1. 基于实体共现的关系推断
        triplets.extend(self.extract_cooccurrence_relations(sentence, entity_texts, normalize_relations))
        
        # 2. 基于句法模式的关系提取
        triplets.extend(self.extract_syntactic_relations(sentence, entity_texts, normalize_relations))
        
        # 3. 基于语义模式的关系提取
        triplets.extend(self.extract_semantic_relations(sentence, entity_texts, normalize_relations))
        
        return triplets
    
    def extract_cooccurrence_relations(self, sentence, entities, normalize_relations):
        """基于实体共现的关系推断"""
        triplets = []
        
        # 寻找句子中出现的实体对
        sentence_entities = [e for e in entities if e in sentence]
        
        for i in range(len(sentence_entities)):
            for j in range(i + 1, len(sentence_entities)):
                entity1 = sentence_entities[i]
                entity2 = sentence_entities[j]
                
                # 寻找实体间的连接词
                entity1_pos = sentence.find(entity1)
                entity2_pos = sentence.find(entity2)
                
                if entity1_pos != -1 and entity2_pos != -1:
                    # 确定实体顺序
                    if entity1_pos < entity2_pos:
                        subject, obj = entity1, entity2
                        start_pos, end_pos = entity1_pos + len(entity1), entity2_pos
                    else:
                        subject, obj = entity2, entity1
                        start_pos, end_pos = entity2_pos + len(entity2), entity1_pos
                    
                    # 提取中间的关系词
                    middle_text = sentence[start_pos:end_pos].strip()
                    relation = self.infer_relation_from_context(middle_text, normalize_relations)
                    
                    if relation and subject != obj:
                        triplets.append((subject, relation, obj))
        
        return triplets
    
    def extract_syntactic_relations(self, sentence, entities, normalize_relations):
        """基于句法模式的关系提取"""
        triplets = []
        
        # 定义句法模式
        patterns = [
            # A的B模式
            (r'([^，。；！？]+?)的([^，。；！？]+)', 'belongs_to' if normalize_relations else '的'),
            # A和B模式
            (r'([^，。；！？]+?)和([^，。；！？]+)', 'and' if normalize_relations else '和'),
            # A与B模式
            (r'([^，。；！？]+?)与([^，。；！？]+)', 'with' if normalize_relations else '与'),
            # A在B模式
            (r'([^，。；！？]+?)在([^，。；！？]+)', 'located_in' if normalize_relations else '在'),
            # A从B模式
            (r'([^，。；！？]+?)从([^，。；！？]+)', 'from' if normalize_relations else '从')
        ]
        
        for pattern, relation in patterns:
            matches = regex.finditer(pattern, sentence)
            for match in matches:
                subject = match.group(1).strip()
                obj = match.group(2).strip()
                
                # 验证是否为有效实体
                if (any(e in subject for e in entities) and 
                    any(e in obj for e in entities) and 
                    subject != obj):
                    triplets.append((subject, relation, obj))
        
        return triplets
    
    def extract_semantic_relations(self, sentence, entities, normalize_relations):
        """基于语义模式的关系提取"""
        triplets = []
        
        # 语义关系推断规则
        semantic_rules = [
            # 时间关系
            (r'在(\d{4}年?)', 'in_year'),
            (r'于(\d{4}年?)', 'in_year'),
            # 数量关系
            (r'有(\d+[个件只台套])', 'has_count'),
            (r'包含(\d+[个件只台套])', 'contains_count'),
            # 版本关系
            (r'版本(v?\d+(?:\.\d+)*)', 'version'),
            (r'(v?\d+(?:\.\d+)*)版本?', 'version')
        ]
        
        for pattern, relation_type in semantic_rules:
            matches = regex.finditer(pattern, sentence)
            for match in matches:
                # 寻找最近的实体作为主语
                match_pos = match.start()
                nearest_entity = self.find_nearest_entity_to_position(sentence, entities, match_pos)
                
                if nearest_entity:
                    value = match.group(1)
                    relation = relation_type if normalize_relations else match.group(0)
                    triplets.append((nearest_entity, relation, value))
        
        return triplets
    
    def ml_enhanced_extraction(self, sentence, entities, normalize_relations):
        """机器学习增强的关系提取"""
        triplets = []
        
        try:
            # 使用TF-IDF分析句子特征
            sentence_features = self.extract_sentence_features(sentence)
            
            # 基于特征相似度推断潜在关系
            potential_relations = self.infer_relations_by_similarity(sentence, entities, sentence_features)
            
            for subject, relation, obj in potential_relations:
                if normalize_relations:
                    relation = self.normalize_ml_relation(relation)
                triplets.append((subject, relation, obj))
                
        except Exception as e:
            # 如果ML增强失败，继续使用基础方法
            pass
        
        return triplets
    
    def extract_sentence_features(self, sentence):
        """提取句子特征"""
        # 简化版特征提取
        features = {
            'length': len(sentence),
            'word_count': len(sentence.split()),
            'has_english': bool(regex.search(r'[A-Za-z]', sentence)),
            'has_numbers': bool(regex.search(r'\d', sentence)),
            'has_punctuation': bool(regex.search(r'[，。；！？]', sentence))
        }
        return features
    
    def infer_relations_by_similarity(self, sentence, entities, features):
        """基于相似度推断关系"""
        # 这里可以实现更复杂的ML算法
        # 目前使用简化版本
        return []
    
    def find_nearest_entity_to_position(self, sentence, entities, position):
        """找到距离指定位置最近的实体"""
        min_distance = float('inf')
        nearest_entity = None
        
        for entity in entities:
            if entity in sentence:
                entity_pos = sentence.find(entity)
                distance = abs(entity_pos - position)
                if distance < min_distance:
                    min_distance = distance
                    nearest_entity = entity
        
        return nearest_entity
    
    def infer_relation_from_context(self, context, normalize_relations):
        """从上下文推断关系"""
        context = context.strip()
        
        if not context:
            return 'related_to' if normalize_relations else '相关'
        
        # 查找已知关系词
        for relation in self.relation_patterns['explicit_relations']:
            if relation in context:
                return self.normalize_relation(relation) if normalize_relations else relation
        
        # 基于关键词推断
        if any(word in context for word in ['的', "'s", 'of']):
            return 'belongs_to' if normalize_relations else '的'
        elif any(word in context for word in ['和', 'and', '与', 'with']):
            return 'and' if normalize_relations else '和'
        elif any(word in context for word in ['在', 'in', 'at']):
            return 'located_in' if normalize_relations else '在'
        
        return 'related_to' if normalize_relations else '相关'
    
    def split_paragraphs(self, text):
        """段落分割"""
        paragraphs = regex.split(r'\n\s*\n|\s{3,}', text)
        return [p.strip() for p in paragraphs if len(p.strip()) > 10]
    
    def split_sentences(self, text):
        """句子分割（支持中英文）"""
        # 中英文混合分句
        sentences = regex.split(r'[。！？!?;；]\s*|[\n]\s*', text)
        sentences = [s.strip() for s in sentences if len(s.strip()) > 3]
        
        # 处理英文句号
        processed_sentences = []
        for sentence in sentences:
            # 分割英文句号，但保留缩写
            parts = regex.split(r'\.(?!\w)', sentence)
            for part in parts:
                part = part.strip()
                if len(part) > 3:
                    processed_sentences.append(part)
        
        return processed_sentences
    
    def post_process_triplets(self, triplets):
        """后处理三元组"""
        # 去重
        unique_triplets = self.deduplicate_triplets(triplets)
        
        # 过滤低质量三元组
        filtered_triplets = []
        for subject, relation, obj in unique_triplets:
            if self.is_valid_triplet(subject, relation, obj):
                filtered_triplets.append((subject, relation, obj))
        
        return filtered_triplets
    
    def is_valid_triplet(self, subject, relation, obj):
        """验证三元组质量"""
        # 基本检查
        if not all([subject, relation, obj]):
            return False
        
        # 长度检查
        if len(subject) < 1 or len(obj) < 1:
            return False
        
        # 相同性检查
        if subject == obj:
            return False
        
        # 停用词检查
        if subject in self.stop_words or obj in self.stop_words:
            return False
        
        return True
    
    def resolve_references(self, triplets, entities, sentence_idx, sentences):
        """指代消解"""
        resolved_triplets = []
        
        for subject, relation, obj in triplets:
            resolved_subject = self.resolve_pronoun(subject, entities, sentence_idx, sentences)
            resolved_obj = self.resolve_pronoun(obj, entities, sentence_idx, sentences)
            
            if resolved_subject and resolved_obj and resolved_subject != resolved_obj:
                resolved_triplets.append((resolved_subject, relation, resolved_obj))
        
        return resolved_triplets
    
    def resolve_pronoun(self, entity, entities, sentence_idx, sentences):
        """解析代词"""
        if isinstance(entity, str) and entity in self.pronoun_mapping:
            # 简化的指代消解：使用最近提到的实体
            if entities:
                entity_texts = [e['text'] if isinstance(e, dict) else e for e in entities]
                return entity_texts[0] if entity_texts else None
            return None
        return entity
    
    def deduplicate_triplets(self, triplets):
        """去重三元组"""
        seen = set()
        unique_triplets = []
        
        for triplet in triplets:
            normalized = tuple(str(s).strip() for s in triplet)
            if normalized not in seen and all(s for s in normalized):
                seen.add(normalized)
                unique_triplets.append(normalized)
        
        return unique_triplets
    
    # 继承原有的方法并进行适配
    def extract_explicit_relations(self, words_list, normalize_relations=True):
        """基于显式关系词的三元组提取"""
        triplets = []
        
        for i, (word, flag) in enumerate(words_list):
            if word in self.relation_patterns['explicit_relations']:
                subject = self.extract_subject(words_list, i)
                obj = self.extract_object(words_list, i)
                
                if subject and obj and subject != obj:
                    if normalize_relations:
                        relation_type = self.normalize_relation(word)
                    else:
                        relation_type = word
                    triplets.append((subject, relation_type, obj))
        
        return triplets
    
    def extract_verb_relations(self, words_list, normalize_relations=True):
        """基于动词的关系提取"""
        triplets = []
        
        for i, (word, flag) in enumerate(words_list):
            if flag in self.relation_patterns['verb_flags'] and word not in self.stop_words:
                subject = self.find_subject_for_verb(words_list, i)
                obj = self.find_object_for_verb(words_list, i)
                
                if subject and obj and subject != obj:
                    if normalize_relations:
                        relation = self.normalize_verb_relation(word)
                    else:
                        relation = word
                    triplets.append((subject, relation, obj))
        
        return triplets
    
    def extract_is_relations(self, words_list, normalize_relations=True):
        """基于'是'字句的关系提取"""
        triplets = []
        
        for i, (word, flag) in enumerate(words_list):
            if word in {'是', '为', '乃', '即', '系', '属', 'is', 'are', 'was', 'were'}:
                subject = self.extract_subject(words_list, i)
                obj = self.extract_object(words_list, i)
                
                if subject and obj and subject != obj:
                    relation = 'is_a' if normalize_relations else word
                    triplets.append((subject, relation, obj))
        
        return triplets
    
    def extract_prep_relations(self, words_list, normalize_relations=True):
        """基于介词短语的关系提取"""
        triplets = []
        
        prep_words = {'在', '于', '从', '自', '由', '向', '朝', '往', '到', '至', 
                     '与', '和', '同', '跟', 'in', 'at', 'on', 'from', 'to', 'with'}
        
        for i, (word, flag) in enumerate(words_list):
            if word in prep_words or flag in self.relation_patterns['prep_flags']:
                subject = self.find_nearest_entity(words_list, i, direction='left')
                obj = self.find_nearest_entity(words_list, i, direction='right')
                
                if subject and obj and subject != obj:
                    if normalize_relations:
                        relation = self.normalize_prep_relation(word)
                    else:
                        relation = word
                    triplets.append((subject, relation, obj))
        
        return triplets
    
    def find_subject_for_verb(self, words_list, verb_pos):
        """为动词寻找主语"""
        for i in range(verb_pos - 1, -1, -1):
            word, flag = words_list[i]
            if word in self.stop_words:
                continue
            if self.is_entity_flag(word, flag):
                return word
            if word in {'，', ',', '；', ';'}:
                break
        return None
    
    def find_object_for_verb(self, words_list, verb_pos):
        """为动词寻找宾语"""
        for i in range(verb_pos + 1, len(words_list)):
            word, flag = words_list[i]
            if word in self.stop_words:
                continue
            if self.is_entity_flag(word, flag):
                return word
            if word in {'，', ',', '；', ';'}:
                break
        return None
    
    def find_nearest_entity(self, words_list, pos, direction='right'):
        """寻找最近的实体"""
        if direction == 'right':
            search_range = range(pos + 1, len(words_list))
        else:
            search_range = range(pos - 1, -1, -1)
        
        for i in search_range:
            word, flag = words_list[i]
            if word in self.stop_words:
                continue
            if self.is_entity_flag(word, flag):
                return word
            if word in {'，', ',', '；', ';', '。', '.'}:
                break
        return None
    
    def is_entity_flag(self, word, flag):
        """判断是否为实体标志"""
        return (flag.startswith('n') or flag in {'nr', 'ns', 'nt', 'en'}) and \
               len(word) > 1 and word not in self.stop_words
    
    def extract_subject(self, words_list, rel_pos):
        """提取关系词前的主体"""
        subject_words = []
        
        for i in range(rel_pos-1, -1, -1):
            word, flag = words_list[i]
            
            if word in self.stop_words:
                break
            
            if flag.startswith('n') or flag in {'nr', 'ns', 'nt', 'en'}:
                subject_words.insert(0, word)
            else:
                break
        
        return ''.join(subject_words) if subject_words else None
    
    def extract_object(self, words_list, rel_pos):
        """提取关系词后的客体"""
        object_words = []
        
        for i in range(rel_pos+1, len(words_list)):
            word, flag = words_list[i]
            
            if word in self.stop_words:
                break
            
            if flag.startswith('n') or flag in {'nr', 'ns', 'nt', 'en'}:
                object_words.append(word)
            else:
                break
        
        return ''.join(object_words) if object_words else None
    
    def normalize_relation(self, relation_word):
        """标准化关系类型"""
        relation_mapping = {
            # 中文关系映射
            '出生': 'born_in', '出生于': 'born_in', '诞生': 'born_in', '诞生于': 'born_in', '生于': 'born_in',
            '毕业': 'graduated_from', '毕业于': 'graduated_from', '就读': 'studied_at', '就读于': 'studied_at',
            '工作': 'works_at', '工作于': 'works_at', '就职': 'works_at', '就职于': 'works_at', '任职': 'works_at',
            '担任': 'works_as', '负责': 'responsible_for', '领导': 'leads', '管理': 'manages',
            '位于': 'located_in', '处于': 'located_in', '坐落': 'located_in', '建于': 'located_in',
            '发明': 'invented', '发现': 'discovered', '创造': 'created', '创作': 'created', '撰写': 'wrote',
            '开发': 'developed', '研发': 'developed', '创建': 'created', '建立': 'established',
            '使用': 'uses', '应用': 'applies', '采用': 'adopts', '运用': 'utilizes',
            '拥有': 'owns', '持有': 'holds', '具有': 'has', '包含': 'contains',
            '属于': 'belongs_to', '来自': 'from', '基于': 'based_on', '依据': 'based_on',
            # 英文关系映射
            'created': 'created', 'founded': 'founded', 'developed': 'developed', 'invented': 'invented',
            'wrote': 'wrote', 'published': 'published', 'worked': 'worked_at', 'studied': 'studied_at',
            'graduated': 'graduated_from', 'located': 'located_in', 'based': 'based_on',
            'used': 'uses', 'owns': 'owns', 'acquired': 'acquired', 'merged': 'merged_with',
            'partnered': 'partnered_with', 'collaborated': 'collaborated_with',
            'released': 'released', 'launched': 'launched', 'announced': 'announced'
        }
        
        return relation_mapping.get(relation_word, 'related_to')
    
    def normalize_verb_relation(self, verb):
        """标准化动词关系"""
        verb_mapping = {
            '是': 'is', '为': 'is', '成为': 'became', '变成': 'became',
            '做': 'does', '进行': 'conducts', '执行': 'executes',
            '去': 'goes_to', '来': 'comes_from', '到': 'arrives_at',
            '买': 'buys', '卖': 'sells', '送': 'gives', '收': 'receives',
            '说': 'says', '讲': 'speaks', '谈': 'talks', '告诉': 'tells',
            '看': 'sees', '听': 'hears', '想': 'thinks', '知道': 'knows',
            '喜欢': 'likes', '爱': 'loves', '帮助': 'helps', '支持': 'supports'
        }
        
        return verb_mapping.get(verb, verb + '_relation')
    
    def normalize_prep_relation(self, prep):
        """标准化介词关系"""
        prep_mapping = {
            '在': 'at', '于': 'at', '从': 'from', '自': 'from', '由': 'by',
            '向': 'towards', '朝': 'towards', '往': 'to', '到': 'to', '至': 'to',
            '与': 'with', '和': 'with', '同': 'with', '跟': 'with',
            'in': 'in', 'at': 'at', 'on': 'on', 'from': 'from', 'to': 'to', 'with': 'with'
        }
        
        return prep_mapping.get(prep, prep + '_relation')
    
    def normalize_ml_relation(self, relation):
        """标准化ML推断的关系"""
        # 简化版本，可以根据需要扩展
        return relation if relation else 'related_to'

# 创建全局实例
kg_extractor = EnhancedKGTripletExtractor()