import os
import json
from pathlib import Path
from typing import List, Dict, Any
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.schema import Document
from langchain.vectorstores import Chroma
from langchain_openai import OpenAIEmbeddings
from app.config import CHROMA_DB_PATH, CHROMA_COLLECTION_NAME, KNOWLEDGE_BASE_FILE
from app.schemas.metadata import DocumentMetadata, KnowledgeSearchResult, KnowledgeProcessingStats


class KnowledgeProcessor:
    """优化后的知识库处理器（适配Web3金融投资分析知识库）"""
    
    def __init__(self):
        self.embeddings = OpenAIEmbeddings()
        # 针对知识库特点定制分割器：优先按列表项、标题分割，片段更聚焦
        self.text_splitter = RecursiveCharacterTextSplitter(
            chunk_size=500,  # 单个片段控制在500字符内（匹配知识库要点长度）
            chunk_overlap=50,  # 保留少量重叠，避免列表项断裂
            length_function=len,
            separators=["\n## ", "\n### ", "\n- ", "\n", "。", "！", "？", ";", " ", ""]
        )
        self.vectorstore = None
    
    def process_knowledge_base(self) -> bool:
        """处理知识库文件，切割并存储到向量数据库"""
        try:
            # 检查知识库文件是否存在
            if not KNOWLEDGE_BASE_FILE.exists():
                return False
            
            # 读取知识库内容
            with open(KNOWLEDGE_BASE_FILE, 'r', encoding='utf-8') as f:
                content = f.read()
            
            # 按标题层级分割（保留一级/二级/三级标题结构）
            sections = self._split_by_hierarchy(content)
            
            # 对每个章节进行文本切割
            all_documents = []
            for section_idx, section in enumerate(sections):
                section_title = section["title"]
                section_content = section["content"]
                
                if not section_content.strip():
                    continue
                
                # 切割章节内容
                chunks = self.text_splitter.split_text(section_content)
                # 修复被分割的列表项（确保"- "开头的要点完整）
                chunks = self._fix_broken_list_items(chunks)
                
                # 生成文档对象
                for chunk_idx, chunk in enumerate(chunks):
                    chunk = chunk.strip()
                    if not chunk:
                        continue
                    
                    # 提取元数据（用于精准检索）
                    metadata_obj = self._extract_metadata(
                        section_title=section_title,
                        section_idx=section_idx,
                        chunk_idx=chunk_idx
                    )
                    
                    # 转换为Chroma兼容的元数据格式
                    metadata = metadata_obj.to_chroma_metadata()
                    
                    doc = Document(
                        page_content=chunk,
                        metadata=metadata
                    )
                    all_documents.append(doc)
            
            # 创建并初始化向量存储
            self.vectorstore = Chroma(
                collection_name=CHROMA_COLLECTION_NAME,
                persist_directory=str(CHROMA_DB_PATH),
                embedding_function=self.embeddings
            )
            
            # 添加文档到向量存储
            if all_documents:
                self.vectorstore.add_documents(all_documents)
                self.vectorstore.persist()
                
                # 保存处理统计信息
                self._save_processing_stats(len(sections), len(all_documents))
                return True
            else:
                return False
                
        except Exception as e:
            return False
    
    def _split_by_hierarchy(self, content: str) -> List[Dict[str, str]]:
        """按标题层级分割内容（支持#/##/###三级标题）"""
        sections = []
        current_level = 0  # 标题层级（1: #, 2: ##, 3: ###）
        current_title = ""
        current_content = []
        
        for line in content.split('\n'):
            line = line.strip()
            if not line:
                continue
            
            # 识别标题行（#/##/###）
            if line.startswith('### '):
                level = 3
                title = line
            elif line.startswith('## '):
                level = 2
                title = line
            elif line.startswith('# '):
                level = 1
                title = line
            else:
                # 非标题行，加入当前章节内容
                current_content.append(line)
                continue
            
            # 遇到新标题，保存上一个章节
            if current_title:
                sections.append({
                    "title": current_title,
                    "level": current_level,
                    "content": '\n'.join(current_content).strip()
                })
            
            # 更新当前章节信息
            current_title = title
            current_level = level
            current_content = []
        
        # 添加最后一个章节
        if current_title and current_content:
            sections.append({
                "title": current_title,
                "level": current_level,
                "content": '\n'.join(current_content).strip()
            })
        
        return sections
    
    def _fix_broken_list_items(self, chunks: List[str]) -> List[str]:
        """修复被分割的列表项（确保"- "开头的要点完整）"""
        if not chunks:
            return []
        
        merged_chunks = [chunks[0]]
        for chunk in chunks[1:]:
            prev_chunk = merged_chunks[-1]
            # 若当前片段不是新列表项，且前一个片段以"-"结尾，合并
            if not chunk.startswith('- ') and prev_chunk.endswith('-'):
                merged_chunks[-1] = f"{prev_chunk}{chunk}"
            else:
                merged_chunks.append(chunk)
        return merged_chunks
    
    def _extract_metadata(self, section_title: str, section_idx: int, chunk_idx: int) -> DocumentMetadata:
        """从章节标题中提取元数据（简化版）"""
        # 清洗标题（去除#和空格）
        clean_title = section_title.strip('# ').strip()
        
        # 提取核心类别
        category = clean_title.split('：')[0].split(' ')[0] if '：' in clean_title else clean_title
        
        # 简单提取关键词（基于标题内容）
        keywords = []
        # 提取常见的代币符号和项目名称
        import re
        # 匹配大写字母组合（代币符号）
        token_matches = re.findall(r'\b[A-Z]{2,6}\b', clean_title)
        keywords.extend(token_matches)
        
        # 匹配项目名称（首字母大写的单词）
        project_matches = re.findall(r'\b[A-Z][a-z]+\b', clean_title)
        keywords.extend(project_matches)
        
        # 去重
        keywords = list(set(keywords))
        
        # 创建pydantic模型实例
        metadata = DocumentMetadata(
            section_id=section_idx,
            chunk_id=chunk_idx,
            source=str(KNOWLEDGE_BASE_FILE),
            title=section_title,
            category=category,
            keywords=keywords,
            chunk_length=len(section_title)
        )
        
        return metadata
    
    def _save_processing_stats(self, sections_count: int, documents_count: int):
        """保存处理统计信息"""
        stats = KnowledgeProcessingStats(
            sections_count=sections_count,
            documents_count=documents_count,
            processing_time=str(Path().cwd()),
            vector_db_path=str(CHROMA_DB_PATH),
            knowledge_type="Web3金融投资分析知识库"
        )
        
        # 确保目录存在
        CHROMA_DB_PATH.mkdir(parents=True, exist_ok=True)
        stats_file = CHROMA_DB_PATH / "processing_stats.json"
        with open(stats_file, 'w', encoding='utf-8') as f:
            json.dump(stats.dict(), f, ensure_ascii=False, indent=2)
    
    def load_existing_vectorstore(self) -> bool:
        """加载已存在的向量存储"""
        try:
            if not CHROMA_DB_PATH.exists():
                return False
            
            self.vectorstore = Chroma(
                collection_name=CHROMA_COLLECTION_NAME,
                persist_directory=str(CHROMA_DB_PATH),
                embedding_function=self.embeddings
            )
            
            # 检查是否有数据
            collection = self.vectorstore._collection
            count = collection.count()
            return count > 0
            
        except Exception as e:
            return False
    
    def search_similar_documents(self, query: str, k: int = 5, filter_by: Dict[str, Any] = None) -> List[KnowledgeSearchResult]:
        """搜索相似文档，支持按元数据筛选（如按category或keywords）"""
        if not self.vectorstore:
            return []
        
        try:
            # 执行相似度搜索（支持筛选）
            docs = self.vectorstore.similarity_search_with_score(
                query=query,
                k=k,
                filter=filter_by  # 例如：{"category": "代币-项目-机构关联关系库"}
            )
            
            results = []
            for doc, score in docs:
                # 创建pydantic模型实例
                result = KnowledgeSearchResult(
                    content=doc.page_content,
                    metadata=DocumentMetadata(**doc.metadata),
                    similarity_score=float(score)
                )
                results.append(result)
            
            return results
            
        except Exception as e:
            return []
    
    def get_retriever(self, search_kwargs: Dict[str, Any] = None):
        """获取检索器，支持自定义搜索参数"""
        if not self.vectorstore:
            return None
        
        if search_kwargs is None:
            search_kwargs = {"k": 5}
        
        return self.vectorstore.as_retriever(search_kwargs=search_kwargs)