#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
知识图谱构建器

实现从文档中提取实体、关系，并构建知识图谱的功能。
支持多种实体识别和关系抽取方法。
"""

import re
import logging
from typing import Dict, List, Any, Optional, Tuple, Set
from dataclasses import dataclass
from collections import defaultdict
import asyncio
from datetime import datetime

logger = logging.getLogger(__name__)

@dataclass
class Entity:
    """实体类"""
    name: str
    entity_type: str
    confidence: float
    start_pos: int
    end_pos: int
    context: str
    properties: Dict[str, Any] = None
    
    def __post_init__(self):
        if self.properties is None:
            self.properties = {}

@dataclass
class Relation:
    """关系类"""
    subject: str
    predicate: str
    object: str
    confidence: float
    context: str
    properties: Dict[str, Any] = None
    
    def __post_init__(self):
        if self.properties is None:
            self.properties = {}

@dataclass
class KnowledgeGraph:
    """知识图谱类"""
    entities: List[Entity]
    relations: List[Relation]
    metadata: Dict[str, Any]
    
    def __post_init__(self):
        if self.metadata is None:
            self.metadata = {}

class EntityExtractor:
    """实体抽取器"""
    
    def __init__(self):
        # 预定义实体类型和模式
        self.entity_patterns = {
            'PERSON': [
                r'[\u4e00-\u9fff]{2,4}(?:先生|女士|教授|博士|老师|同学)',
                r'(?:张|王|李|刘|陈|杨|赵|黄|周|吴|徐|孙|胡|朱|高|林|何|郭|马|罗|梁|宋|郑|谢|韩|唐|冯|于|董|萧|程|曹|袁|邓|许|傅|沈|曾|彭|吕|苏|卢|蒋|蔡|贾|丁|魏|薛|叶|阎|余|潘|杜|戴|夏|钟|汪|田|任|姜|范|方|石|姚|谭|廖|邹|熊|金|陆|郝|孔|白|崔|康|毛|邱|秦|江|史|顾|侯|邵|孟|龙|万|段|漕|钱|汤|尹|黎|易|常|武|乔|贺|赖|龚|文)[\u4e00-\u9fff]{1,3}',
            ],
            'ORGANIZATION': [
                r'[\u4e00-\u9fff]+(?:公司|企业|集团|机构|组织|部门|学校|大学|学院|医院|银行|政府)',
                r'(?:中国|北京|上海|广州|深圳|杭州|南京|武汉|成都|西安|重庆)[\u4e00-\u9fff]*(?:公司|大学|学院|医院|银行)',
            ],
            'LOCATION': [
                r'[\u4e00-\u9fff]+(?:省|市|县|区|镇|村|街道|路|街|巷|号)',
                r'(?:北京|上海|天津|重庆|河北|山西|辽宁|吉林|黑龙江|江苏|浙江|安徽|福建|江西|山东|河南|湖北|湖南|广东|海南|四川|贵州|云南|陕西|甘肃|青海|台湾|内蒙古|广西|西藏|宁夏|新疆|香港|澳门)',
            ],
            'TIME': [
                r'\d{4}年\d{1,2}月\d{1,2}日',
                r'\d{4}-\d{1,2}-\d{1,2}',
                r'\d{1,2}月\d{1,2}日',
                r'(?:今天|明天|昨天|前天|后天)',
                r'(?:上午|下午|晚上|凌晨)\d{1,2}[：:]\d{2}',
            ],
            'CONCEPT': [
                r'[\u4e00-\u9fff]+(?:理论|概念|方法|技术|算法|模型|系统|框架|架构)',
                r'(?:人工智能|机器学习|深度学习|神经网络|自然语言处理|计算机视觉|数据挖掘|大数据)',
            ],
            'PRODUCT': [
                r'[\u4e00-\u9fff]+(?:产品|软件|系统|平台|工具|设备|装置)',
            ]
        }
        
        # 编译正则表达式
        self.compiled_patterns = {}
        for entity_type, patterns in self.entity_patterns.items():
            self.compiled_patterns[entity_type] = [
                re.compile(pattern) for pattern in patterns
            ]
    
    async def extract_entities(self, text: str, context: str = "") -> List[Entity]:
        """
        从文本中提取实体
        
        Args:
            text: 输入文本
            context: 上下文信息
            
        Returns:
            实体列表
        """
        entities = []
        
        for entity_type, patterns in self.compiled_patterns.items():
            for pattern in patterns:
                matches = pattern.finditer(text)
                for match in matches:
                    entity_name = match.group().strip()
                    if len(entity_name) > 1:  # 过滤单字符实体
                        entity = Entity(
                            name=entity_name,
                            entity_type=entity_type,
                            confidence=0.8,  # 基于规则的置信度
                            start_pos=match.start(),
                            end_pos=match.end(),
                            context=context,
                            properties={
                                'extraction_method': 'regex',
                                'pattern_matched': pattern.pattern
                            }
                        )
                        entities.append(entity)
        
        # 去重和过滤
        entities = self._deduplicate_entities(entities)
        
        return entities
    
    def _deduplicate_entities(self, entities: List[Entity]) -> List[Entity]:
        """
        实体去重
        
        Args:
            entities: 原始实体列表
            
        Returns:
            去重后的实体列表
        """
        seen = set()
        deduplicated = []
        
        for entity in entities:
            # 使用名称和类型作为去重键
            key = (entity.name.lower(), entity.entity_type)
            if key not in seen:
                seen.add(key)
                deduplicated.append(entity)
            else:
                # 如果重复，保留置信度更高的
                for i, existing in enumerate(deduplicated):
                    if (existing.name.lower(), existing.entity_type) == key:
                        if entity.confidence > existing.confidence:
                            deduplicated[i] = entity
                        break
        
        return deduplicated

class RelationExtractor:
    """关系抽取器"""
    
    def __init__(self):
        # 预定义关系模式
        self.relation_patterns = {
            'WORK_FOR': [
                r'([\u4e00-\u9fff]+)(?:在|于)([\u4e00-\u9fff]+(?:公司|企业|机构))(?:工作|任职)',
                r'([\u4e00-\u9fff]+)(?:是|为)([\u4e00-\u9fff]+(?:公司|企业))(?:的)?(?:员工|职员|经理|总监)',
            ],
            'LOCATED_IN': [
                r'([\u4e00-\u9fff]+(?:公司|企业|机构))(?:位于|在)([\u4e00-\u9fff]+(?:省|市|县|区))',
                r'([\u4e00-\u9fff]+)(?:坐落于|设在)([\u4e00-\u9fff]+(?:省|市|县|区))',
            ],
            'PART_OF': [
                r'([\u4e00-\u9fff]+)(?:属于|隶属于)([\u4e00-\u9fff]+)',
                r'([\u4e00-\u9fff]+)(?:是)([\u4e00-\u9fff]+)(?:的一部分|的组成部分)',
            ],
            'RELATED_TO': [
                r'([\u4e00-\u9fff]+)(?:与|和)([\u4e00-\u9fff]+)(?:相关|有关)',
                r'([\u4e00-\u9fff]+)(?:涉及|包含)([\u4e00-\u9fff]+)',
            ],
            'CREATED_BY': [
                r'([\u4e00-\u9fff]+)(?:由|被)([\u4e00-\u9fff]+)(?:创建|开发|设计)',
                r'([\u4e00-\u9fff]+)(?:是)([\u4e00-\u9fff]+)(?:创造的|开发的)',
            ],
            'HAPPENED_AT': [
                r'([\u4e00-\u9fff]+)(?:发生在|在)(\d{4}年|\d{1,2}月)',
                r'(\d{4}年|\d{1,2}月)([\u4e00-\u9fff]+)(?:发生|举行)',
            ]
        }
        
        # 编译正则表达式
        self.compiled_patterns = {}
        for relation_type, patterns in self.relation_patterns.items():
            self.compiled_patterns[relation_type] = [
                re.compile(pattern) for pattern in patterns
            ]
    
    async def extract_relations(self, text: str, entities: List[Entity], 
                              context: str = "") -> List[Relation]:
        """
        从文本中提取关系
        
        Args:
            text: 输入文本
            entities: 已识别的实体列表
            context: 上下文信息
            
        Returns:
            关系列表
        """
        relations = []
        
        # 基于模式的关系抽取
        pattern_relations = await self._extract_pattern_relations(text, context)
        relations.extend(pattern_relations)
        
        # 基于实体共现的关系抽取
        cooccurrence_relations = await self._extract_cooccurrence_relations(
            text, entities, context
        )
        relations.extend(cooccurrence_relations)
        
        # 去重和过滤
        relations = self._deduplicate_relations(relations)
        
        return relations
    
    async def _extract_pattern_relations(self, text: str, context: str) -> List[Relation]:
        """
        基于模式的关系抽取
        
        Args:
            text: 输入文本
            context: 上下文
            
        Returns:
            关系列表
        """
        relations = []
        
        for relation_type, patterns in self.compiled_patterns.items():
            for pattern in patterns:
                matches = pattern.finditer(text)
                for match in matches:
                    groups = match.groups()
                    if len(groups) >= 2:
                        subject = groups[0].strip()
                        obj = groups[1].strip()
                        
                        if subject and obj and subject != obj:
                            relation = Relation(
                                subject=subject,
                                predicate=relation_type,
                                object=obj,
                                confidence=0.7,
                                context=context,
                                properties={
                                    'extraction_method': 'pattern',
                                    'pattern_matched': pattern.pattern,
                                    'match_text': match.group()
                                }
                            )
                            relations.append(relation)
        
        return relations
    
    async def _extract_cooccurrence_relations(self, text: str, entities: List[Entity],
                                            context: str) -> List[Relation]:
        """
        基于实体共现的关系抽取
        
        Args:
            text: 输入文本
            entities: 实体列表
            context: 上下文
            
        Returns:
            关系列表
        """
        relations = []
        
        # 计算实体间的距离，如果距离较近则认为可能存在关系
        for i, entity1 in enumerate(entities):
            for j, entity2 in enumerate(entities[i+1:], i+1):
                distance = abs(entity1.start_pos - entity2.start_pos)
                
                # 如果两个实体距离较近（在同一句话内）
                if distance < 100:  # 假设句子长度不超过100字符
                    # 提取两个实体之间的文本
                    start_pos = min(entity1.start_pos, entity2.start_pos)
                    end_pos = max(entity1.end_pos, entity2.end_pos)
                    between_text = text[start_pos:end_pos]
                    
                    # 简单的关系推断
                    relation_type = self._infer_relation_type(
                        entity1, entity2, between_text
                    )
                    
                    if relation_type:
                        relation = Relation(
                            subject=entity1.name,
                            predicate=relation_type,
                            object=entity2.name,
                            confidence=0.5,  # 共现关系置信度较低
                            context=context,
                            properties={
                                'extraction_method': 'cooccurrence',
                                'distance': distance,
                                'between_text': between_text
                            }
                        )
                        relations.append(relation)
        
        return relations
    
    def _infer_relation_type(self, entity1: Entity, entity2: Entity, 
                           between_text: str) -> Optional[str]:
        """
        推断实体间的关系类型
        
        Args:
            entity1: 第一个实体
            entity2: 第二个实体
            between_text: 实体间的文本
            
        Returns:
            关系类型或None
        """
        # 基于实体类型和中间文本推断关系
        type1, type2 = entity1.entity_type, entity2.entity_type
        
        # 人员-组织关系
        if type1 == 'PERSON' and type2 == 'ORGANIZATION':
            if any(word in between_text for word in ['在', '于', '工作', '任职']):
                return 'WORK_FOR'
        
        # 组织-地点关系
        if type1 == 'ORGANIZATION' and type2 == 'LOCATION':
            if any(word in between_text for word in ['位于', '在', '坐落']):
                return 'LOCATED_IN'
        
        # 概念-概念关系
        if type1 == 'CONCEPT' and type2 == 'CONCEPT':
            if any(word in between_text for word in ['与', '和', '相关', '包含']):
                return 'RELATED_TO'
        
        # 时间-事件关系
        if type1 == 'TIME' or type2 == 'TIME':
            return 'HAPPENED_AT'
        
        # 默认关系
        if any(word in between_text for word in ['的', '与', '和']):
            return 'RELATED_TO'
        
        return None
    
    def _deduplicate_relations(self, relations: List[Relation]) -> List[Relation]:
        """
        关系去重
        
        Args:
            relations: 原始关系列表
            
        Returns:
            去重后的关系列表
        """
        seen = set()
        deduplicated = []
        
        for relation in relations:
            # 使用主语、谓语、宾语作为去重键
            key = (relation.subject.lower(), relation.predicate, relation.object.lower())
            if key not in seen:
                seen.add(key)
                deduplicated.append(relation)
            else:
                # 如果重复，保留置信度更高的
                for i, existing in enumerate(deduplicated):
                    existing_key = (existing.subject.lower(), existing.predicate, existing.object.lower())
                    if existing_key == key:
                        if relation.confidence > existing.confidence:
                            deduplicated[i] = relation
                        break
        
        return deduplicated

class KnowledgeGraphBuilder:
    """知识图谱构建器"""
    
    def __init__(self):
        self.entity_extractor = EntityExtractor()
        self.relation_extractor = RelationExtractor()
        
        # 统计信息
        self.stats = {
            'documents_processed': 0,
            'entities_extracted': 0,
            'relations_extracted': 0,
            'graphs_built': 0
        }
    
    async def build_knowledge_graph(self, text: str, document_id: str = None,
                                  metadata: Dict[str, Any] = None) -> KnowledgeGraph:
        """
        从文本构建知识图谱
        
        Args:
            text: 输入文本
            document_id: 文档ID
            metadata: 元数据
            
        Returns:
            知识图谱
        """
        try:
            # 预处理文本
            processed_text = self._preprocess_text(text)
            
            # 提取实体
            entities = await self.entity_extractor.extract_entities(
                processed_text, context=document_id or ""
            )
            
            # 提取关系
            relations = await self.relation_extractor.extract_relations(
                processed_text, entities, context=document_id or ""
            )
            
            # 构建图谱元数据
            graph_metadata = {
                'document_id': document_id,
                'created_at': datetime.now().isoformat(),
                'text_length': len(text),
                'entity_count': len(entities),
                'relation_count': len(relations),
                'extraction_method': 'rule_based',
                **(metadata or {})
            }
            
            # 创建知识图谱
            knowledge_graph = KnowledgeGraph(
                entities=entities,
                relations=relations,
                metadata=graph_metadata
            )
            
            # 更新统计信息
            self.stats['documents_processed'] += 1
            self.stats['entities_extracted'] += len(entities)
            self.stats['relations_extracted'] += len(relations)
            self.stats['graphs_built'] += 1
            
            logger.debug(f"构建知识图谱完成: 实体={len(entities)}, 关系={len(relations)}")
            
            return knowledge_graph
            
        except Exception as e:
            logger.error(f"构建知识图谱失败: {e}")
            raise
    
    def _preprocess_text(self, text: str) -> str:
        """
        文本预处理
        
        Args:
            text: 原始文本
            
        Returns:
            处理后的文本
        """
        # 基础清理
        text = text.strip()
        
        # 移除多余的空白字符
        text = re.sub(r'\s+', ' ', text)
        
        # 移除特殊字符（保留中文、英文、数字、基本标点）
        text = re.sub(r'[^\u4e00-\u9fff\w\s.,;:!?()\[\]{}"\'-]', '', text)
        
        return text
    
    async def build_incremental_graph(self, texts: List[str], 
                                    document_ids: List[str] = None,
                                    batch_size: int = 10) -> List[KnowledgeGraph]:
        """
        增量构建知识图谱
        
        Args:
            texts: 文本列表
            document_ids: 文档ID列表
            batch_size: 批处理大小
            
        Returns:
            知识图谱列表
        """
        graphs = []
        
        # 确保document_ids长度匹配
        if document_ids is None:
            document_ids = [f"doc_{i}" for i in range(len(texts))]
        elif len(document_ids) != len(texts):
            raise ValueError("document_ids长度必须与texts长度相同")
        
        # 分批处理
        for i in range(0, len(texts), batch_size):
            batch_texts = texts[i:i+batch_size]
            batch_ids = document_ids[i:i+batch_size]
            
            # 并发处理批次
            batch_tasks = [
                self.build_knowledge_graph(text, doc_id)
                for text, doc_id in zip(batch_texts, batch_ids)
            ]
            
            batch_graphs = await asyncio.gather(*batch_tasks, return_exceptions=True)
            
            # 过滤异常结果
            for graph in batch_graphs:
                if isinstance(graph, KnowledgeGraph):
                    graphs.append(graph)
                else:
                    logger.error(f"批处理中的图谱构建失败: {graph}")
        
        logger.info(f"增量构建完成: 处理{len(texts)}个文档, 成功构建{len(graphs)}个图谱")
        
        return graphs
    
    def merge_graphs(self, graphs: List[KnowledgeGraph]) -> KnowledgeGraph:
        """
        合并多个知识图谱
        
        Args:
            graphs: 知识图谱列表
            
        Returns:
            合并后的知识图谱
        """
        if not graphs:
            return KnowledgeGraph(entities=[], relations=[], metadata={})
        
        all_entities = []
        all_relations = []
        merged_metadata = {
            'merged_at': datetime.now().isoformat(),
            'source_graphs': len(graphs),
            'source_documents': []
        }
        
        # 收集所有实体和关系
        for graph in graphs:
            all_entities.extend(graph.entities)
            all_relations.extend(graph.relations)
            
            # 收集元数据
            if 'document_id' in graph.metadata:
                merged_metadata['source_documents'].append(graph.metadata['document_id'])
        
        # 去重
        deduplicated_entities = self.entity_extractor._deduplicate_entities(all_entities)
        deduplicated_relations = self.relation_extractor._deduplicate_relations(all_relations)
        
        # 更新元数据
        merged_metadata.update({
            'total_entities': len(deduplicated_entities),
            'total_relations': len(deduplicated_relations),
            'entity_types': list(set(e.entity_type for e in deduplicated_entities)),
            'relation_types': list(set(r.predicate for r in deduplicated_relations))
        })
        
        merged_graph = KnowledgeGraph(
            entities=deduplicated_entities,
            relations=deduplicated_relations,
            metadata=merged_metadata
        )
        
        logger.info(f"图谱合并完成: 实体={len(deduplicated_entities)}, 关系={len(deduplicated_relations)}")
        
        return merged_graph
    
    def get_statistics(self) -> Dict[str, Any]:
        """
        获取构建统计信息
        
        Returns:
            统计信息字典
        """
        return {
            **self.stats,
            'avg_entities_per_doc': (
                self.stats['entities_extracted'] / self.stats['documents_processed']
                if self.stats['documents_processed'] > 0 else 0
            ),
            'avg_relations_per_doc': (
                self.stats['relations_extracted'] / self.stats['documents_processed']
                if self.stats['documents_processed'] > 0 else 0
            )
        }
    
    def reset_statistics(self):
        """重置统计信息"""
        self.stats = {
            'documents_processed': 0,
            'entities_extracted': 0,
            'relations_extracted': 0,
            'graphs_built': 0
        }

# 工厂函数
def create_knowledge_graph_builder() -> KnowledgeGraphBuilder:
    """
    创建知识图谱构建器实例
    
    Returns:
        知识图谱构建器
    """
    return KnowledgeGraphBuilder()

# 便捷函数
async def extract_knowledge_from_text(text: str, document_id: str = None) -> KnowledgeGraph:
    """
    从文本中提取知识图谱的便捷函数
    
    Args:
        text: 输入文本
        document_id: 文档ID
        
    Returns:
        知识图谱
    """
    builder = create_knowledge_graph_builder()
    return await builder.build_knowledge_graph(text, document_id)