#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
知识库构建模块
基于PDF文档构建结构化知识库
"""

import os
import json
import re
import pdfplumber
from typing import List, Dict, Any, Tuple
from dataclasses import dataclass, asdict
from datetime import datetime
import logging

# 配置日志
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)

@dataclass
class KnowledgeEntity:
    """知识实体"""
    id: str
    name: str
    type: str  # concept, event, person, organization, etc.
    description: str
    keywords: List[str]
    relations: List[Dict[str, str]]
    source: str
    confidence: float

@dataclass
class KnowledgeRelation:
    """知识关系"""
    id: str
    subject: str
    predicate: str
    object: str
    description: str
    source: str
    confidence: float

@dataclass
class KnowledgeChunk:
    """知识块"""
    id: str
    title: str
    content: str
    keywords: List[str]
    entities: List[str]
    relations: List[str]
    source: str
    page_number: int
    chunk_index: int

class KnowledgeBaseBuilder:
    """知识库构建器"""
    
    def __init__(self, pdf_path: str = "论人与自然的相处之道.pdf"):
        """
        初始化知识库构建器
        
        Args:
            pdf_path: PDF文件路径
        """
        self.pdf_path = pdf_path
        self.knowledge_base = {
            "entities": {},
            "relations": {},
            "chunks": {},
            "metadata": {
                "source_file": pdf_path,
                "created_at": datetime.now().isoformat(),
                "version": "1.0.0"
            }
        }
        
        # 预定义的概念类型
        self.concept_types = {
            "历史时期": ["原始社会", "农业社会", "工业社会", "现代社会"],
            "生态概念": ["生态危机", "生态平衡", "可持续发展", "环境保护"],
            "关系类型": ["人与自然", "和谐共生", "征服自然", "敬畏自然"],
            "解决方案": ["绿色技术", "法律法规", "生态价值观", "公众参与"]
        }
        
        # 预定义的关系类型
        self.relation_types = [
            "导致", "影响", "包含", "属于", "发展", "演变", "解决", "实现"
        ]
    
    def extract_text_from_pdf(self) -> str:
        """从PDF提取文本"""
        logger.info(f"开始提取PDF文本: {self.pdf_path}")
        
        if not os.path.exists(self.pdf_path):
            raise FileNotFoundError(f"PDF文件不存在: {self.pdf_path}")
        
        text_content = ""
        with pdfplumber.open(self.pdf_path) as pdf:
            logger.info(f"PDF总页数: {len(pdf.pages)}")
            
            for i, page in enumerate(pdf.pages):
                page_text = page.extract_text()
                if page_text:
                    text_content += page_text + "\n"
                    logger.info(f"已处理第{i+1}页，字符数: {len(page_text)}")
        
        logger.info(f"PDF文本提取完成，总字符数: {len(text_content)}")
        return text_content.strip()
    
    def split_into_chunks(self, text: str, chunk_size: int = 300) -> List[Tuple[str, int]]:
        """
        将文本分割成知识块
        
        Args:
            text: 原始文本
            chunk_size: 块大小
            
        Returns:
            List[Tuple[str, int]]: (文本块, 页码) 列表
        """
        logger.info("开始文本分块...")
        
        # 按段落分割
        paragraphs = text.split('\n\n')
        chunks = []
        
        current_chunk = ""
        current_page = 1
        
        for paragraph in paragraphs:
            paragraph = paragraph.strip()
            if not paragraph:
                continue
            
            # 简单的页码检测
            if "页" in paragraph and any(char.isdigit() for char in paragraph):
                try:
                    page_match = re.search(r'第?(\d+)页', paragraph)
                    if page_match:
                        current_page = int(page_match.group(1))
                except:
                    pass
            
            # 如果当前块加上新段落超过大小限制，保存当前块
            if len(current_chunk) + len(paragraph) > chunk_size and current_chunk:
                chunks.append((current_chunk.strip(), current_page))
                current_chunk = paragraph
            else:
                current_chunk += "\n" + paragraph if current_chunk else paragraph
        
        # 保存最后一个块
        if current_chunk:
            chunks.append((current_chunk.strip(), current_page))
        
        logger.info(f"文本分块完成，共生成 {len(chunks)} 个知识块")
        return chunks
    
    def extract_entities(self, text: str) -> List[KnowledgeEntity]:
        """
        从文本中提取知识实体
        
        Args:
            text: 文本内容
            
        Returns:
            List[KnowledgeEntity]: 提取的实体列表
        """
        logger.info("开始提取知识实体...")
        
        entities = []
        entity_id = 1
        
        # 提取概念实体
        for concept_type, keywords in self.concept_types.items():
            for keyword in keywords:
                if keyword in text:
                    # 查找包含该关键词的上下文
                    context = self._extract_context(text, keyword, 100)
                    
                    entity = KnowledgeEntity(
                        id=f"entity_{entity_id}",
                        name=keyword,
                        type=concept_type,
                        description=context,
                        keywords=[keyword],
                        relations=[],
                        source=self.pdf_path,
                        confidence=0.8
                    )
                    entities.append(entity)
                    entity_id += 1
        
        # 使用正则表达式提取更多实体
        patterns = {
            "时间": r'\d{4}年|\d+世纪|古代|现代|近代|当代',
            "地点": r'[^，。！？\n]{2,10}(?:地区|城市|国家|世界)',
            "组织": r'[^，。！？\n]{2,10}(?:政府|组织|机构|部门)',
            "技术": r'[^，。！？\n]{2,10}(?:技术|方法|手段|措施)'
        }
        
        for entity_type, pattern in patterns.items():
            matches = re.findall(pattern, text)
            for match in matches:
                if len(match) > 2:  # 过滤太短的匹配
                    context = self._extract_context(text, match, 50)
                    
                    entity = KnowledgeEntity(
                        id=f"entity_{entity_id}",
                        name=match,
                        type=entity_type,
                        description=context,
                        keywords=[match],
                        relations=[],
                        source=self.pdf_path,
                        confidence=0.6
                    )
                    entities.append(entity)
                    entity_id += 1
        
        logger.info(f"实体提取完成，共提取 {len(entities)} 个实体")
        return entities
    
    def extract_relations(self, text: str, entities: List[KnowledgeEntity]) -> List[KnowledgeRelation]:
        """
        从文本中提取知识关系
        
        Args:
            text: 文本内容
            entities: 实体列表
            
        Returns:
            List[KnowledgeRelation]: 提取的关系列表
        """
        logger.info("开始提取知识关系...")
        
        relations = []
        relation_id = 1
        
        # 创建实体名称映射
        entity_names = {entity.name for entity in entities}
        
        # 关系模式
        relation_patterns = [
            (r'(.+?)导致(.+?)', '导致'),
            (r'(.+?)影响(.+?)', '影响'),
            (r'(.+?)包含(.+?)', '包含'),
            (r'(.+?)属于(.+?)', '属于'),
            (r'(.+?)发展(.+?)', '发展'),
            (r'(.+?)演变(.+?)', '演变'),
            (r'(.+?)解决(.+?)', '解决'),
            (r'(.+?)实现(.+?)', '实现')
        ]
        
        for pattern, predicate in relation_patterns:
            matches = re.finditer(pattern, text)
            for match in matches:
                subject = match.group(1).strip()
                obj = match.group(2).strip()
                
                # 检查是否都是已知实体
                if subject in entity_names and obj in entity_names:
                    context = self._extract_context(text, match.group(0), 100)
                    
                    relation = KnowledgeRelation(
                        id=f"relation_{relation_id}",
                        subject=subject,
                        predicate=predicate,
                        object=obj,
                        description=context,
                        source=self.pdf_path,
                        confidence=0.7
                    )
                    relations.append(relation)
                    relation_id += 1
        
        logger.info(f"关系提取完成，共提取 {len(relations)} 个关系")
        return relations
    
    def _extract_context(self, text: str, keyword: str, context_length: int) -> str:
        """
        提取关键词的上下文
        
        Args:
            text: 原始文本
            keyword: 关键词
            context_length: 上下文长度
            
        Returns:
            str: 上下文文本
        """
        index = text.find(keyword)
        if index == -1:
            return ""
        
        start = max(0, index - context_length)
        end = min(len(text), index + len(keyword) + context_length)
        
        return text[start:end].strip()
    
    def build_knowledge_chunks(self, text_chunks: List[Tuple[str, int]], 
                             entities: List[KnowledgeEntity], 
                             relations: List[KnowledgeRelation]) -> List[KnowledgeChunk]:
        """
        构建知识块
        
        Args:
            text_chunks: 文本块列表
            entities: 实体列表
            relations: 关系列表
            
        Returns:
            List[KnowledgeChunk]: 知识块列表
        """
        logger.info("开始构建知识块...")
        
        knowledge_chunks = []
        entity_names = {entity.name for entity in entities}
        
        for i, (chunk_text, page_num) in enumerate(text_chunks):
            # 提取关键词
            keywords = self._extract_keywords(chunk_text)
            
            # 找到相关的实体
            related_entities = []
            for entity in entities:
                if entity.name in chunk_text:
                    related_entities.append(entity.id)
            
            # 找到相关的关系
            related_relations = []
            for relation in relations:
                if (relation.subject in chunk_text or 
                    relation.object in chunk_text or 
                    relation.predicate in chunk_text):
                    related_relations.append(relation.id)
            
            # 生成标题
            title = self._generate_title(chunk_text)
            
            knowledge_chunk = KnowledgeChunk(
                id=f"chunk_{i+1}",
                title=title,
                content=chunk_text,
                keywords=keywords,
                entities=related_entities,
                relations=related_relations,
                source=self.pdf_path,
                page_number=page_num,
                chunk_index=i+1
            )
            
            knowledge_chunks.append(knowledge_chunk)
        
        logger.info(f"知识块构建完成，共生成 {len(knowledge_chunks)} 个知识块")
        return knowledge_chunks
    
    def _extract_keywords(self, text: str) -> List[str]:
        """提取关键词"""
        # 简单的关键词提取
        keywords = []
        
        # 预定义重要词汇
        important_words = [
            "人与自然", "生态危机", "可持续发展", "环境保护", "工业革命",
            "生态平衡", "和谐共生", "绿色技术", "法律法规", "生态价值观"
        ]
        
        for word in important_words:
            if word in text:
                keywords.append(word)
        
        # 提取高频词汇
        words = re.findall(r'[\u4e00-\u9fff]{2,}', text)
        word_freq = {}
        for word in words:
            if len(word) >= 2:
                word_freq[word] = word_freq.get(word, 0) + 1
        
        # 添加高频词汇
        for word, freq in sorted(word_freq.items(), key=lambda x: x[1], reverse=True)[:5]:
            if word not in keywords and freq >= 2:
                keywords.append(word)
        
        return keywords[:10]  # 限制关键词数量
    
    def _generate_title(self, text: str) -> str:
        """生成标题"""
        # 简单的标题生成
        sentences = re.split(r'[。！？]', text)
        if sentences:
            first_sentence = sentences[0].strip()
            if len(first_sentence) > 50:
                return first_sentence[:50] + "..."
            return first_sentence
        return "未知标题"
    
    def build_knowledge_base(self) -> Dict[str, Any]:
        """
        构建完整知识库
        
        Returns:
            Dict[str, Any]: 知识库数据
        """
        logger.info("=" * 60)
        logger.info("开始构建知识库")
        logger.info("=" * 60)
        
        try:
            # 步骤1: 提取PDF文本
            text = self.extract_text_from_pdf()
            
            # 步骤2: 文本分块
            text_chunks = self.split_into_chunks(text)
            
            # 步骤3: 提取实体
            entities = self.extract_entities(text)
            
            # 步骤4: 提取关系
            relations = self.extract_relations(text, entities)
            
            # 步骤5: 构建知识块
            knowledge_chunks = self.build_knowledge_chunks(text_chunks, entities, relations)
            
            # 步骤6: 组装知识库
            self.knowledge_base["entities"] = {entity.id: asdict(entity) for entity in entities}
            self.knowledge_base["relations"] = {relation.id: asdict(relation) for relation in relations}
            self.knowledge_base["chunks"] = {chunk.id: asdict(chunk) for chunk in knowledge_chunks}
            
            # 更新元数据
            self.knowledge_base["metadata"].update({
                "total_entities": len(entities),
                "total_relations": len(relations),
                "total_chunks": len(knowledge_chunks),
                "total_characters": len(text)
            })
            
            logger.info("=" * 60)
            logger.info("知识库构建完成!")
            logger.info(f"实体数量: {len(entities)}")
            logger.info(f"关系数量: {len(relations)}")
            logger.info(f"知识块数量: {len(knowledge_chunks)}")
            logger.info("=" * 60)
            
            return self.knowledge_base
            
        except Exception as e:
            logger.error(f"知识库构建失败: {e}")
            raise
    
    def save_knowledge_base(self, output_path: str = "knowledge_base.json") -> str:
        """
        保存知识库到文件
        
        Args:
            output_path: 输出文件路径
            
        Returns:
            str: 保存的文件路径
        """
        try:
            with open(output_path, 'w', encoding='utf-8') as f:
                json.dump(self.knowledge_base, f, ensure_ascii=False, indent=2)
            
            logger.info(f"知识库已保存到: {output_path}")
            return output_path
            
        except Exception as e:
            logger.error(f"保存知识库失败: {e}")
            raise
    
    def load_knowledge_base(self, input_path: str = "knowledge_base.json") -> Dict[str, Any]:
        """
        从文件加载知识库
        
        Args:
            input_path: 输入文件路径
            
        Returns:
            Dict[str, Any]: 知识库数据
        """
        try:
            with open(input_path, 'r', encoding='utf-8') as f:
                self.knowledge_base = json.load(f)
            
            logger.info(f"知识库已从 {input_path} 加载")
            return self.knowledge_base
            
        except Exception as e:
            logger.error(f"加载知识库失败: {e}")
            raise
    
    def get_knowledge_summary(self) -> Dict[str, Any]:
        """
        获取知识库摘要
        
        Returns:
            Dict[str, Any]: 知识库摘要
        """
        if not self.knowledge_base:
            return {"error": "知识库未构建"}
        
        # 统计实体类型
        entity_types = {}
        for entity in self.knowledge_base["entities"].values():
            entity_type = entity["type"]
            entity_types[entity_type] = entity_types.get(entity_type, 0) + 1
        
        # 统计关系类型
        relation_types = {}
        for relation in self.knowledge_base["relations"].values():
            predicate = relation["predicate"]
            relation_types[predicate] = relation_types.get(predicate, 0) + 1
        
        return {
            "metadata": self.knowledge_base["metadata"],
            "entity_types": entity_types,
            "relation_types": relation_types,
            "top_entities": list(self.knowledge_base["entities"].keys())[:10],
            "top_relations": list(self.knowledge_base["relations"].keys())[:10]
        }


def main():
    """主函数：构建知识库"""
    builder = KnowledgeBaseBuilder()
    
    # 构建知识库
    knowledge_base = builder.build_knowledge_base()
    
    # 保存知识库
    output_path = builder.save_knowledge_base()
    
    # 显示摘要
    summary = builder.get_knowledge_summary()
    print("\n知识库摘要:")
    print(f"总实体数: {summary['metadata']['total_entities']}")
    print(f"总关系数: {summary['metadata']['total_relations']}")
    print(f"总知识块数: {summary['metadata']['total_chunks']}")
    print(f"总字符数: {summary['metadata']['total_characters']}")
    
    print("\n实体类型分布:")
    for entity_type, count in summary['entity_types'].items():
        print(f"  {entity_type}: {count}")
    
    print("\n关系类型分布:")
    for relation_type, count in summary['relation_types'].items():
        print(f"  {relation_type}: {count}")


if __name__ == "__main__":
    main()
