import os
import re
from typing import List, Dict, Any
import logging
import codecs

# 更新的导入路径
from langchain_community.document_loaders import (
    TextLoader, 
    Docx2txtLoader,
    UnstructuredMarkdownLoader
)
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.docstore.document import Document
import yaml
import traceback

# 配置日志
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)

class DocumentProcessor:
    def __init__(self, config_path="config/config.yaml"):
        logger.info("\n========== 初始化DocumentProcessor(TXT版本) ==========")
        
        # 加载配置，使用UTF-8编码
        with codecs.open(config_path, 'r', encoding='utf-8') as f:
            self.config = yaml.safe_load(f)
            
        # 初始化文本分割器
        self.text_splitter = RecursiveCharacterTextSplitter(
            chunk_size=self.config['document_processing']['chunk_size'],
            chunk_overlap=self.config['document_processing']['chunk_overlap']
        )
            
        # 文件加载器映射
        self.loader_map = {
            '.txt': TextLoader,
            '.docx': Docx2txtLoader,
            '.md': UnstructuredMarkdownLoader
        }
        
        logger.info(f"配置参数: chunk_size={self.config['document_processing']['chunk_size']}, "
              f"chunk_overlap={self.config['document_processing']['chunk_overlap']}")
        logger.info("========== 初始化完成 ==========\n")
    
    def load_document(self, file_path: str):
        """加载单个文档"""
        _, ext = os.path.splitext(file_path.lower())
        
        logger.info(f"开始处理文档: {file_path} (类型: {ext})")
        
        if ext not in self.loader_map:
            raise ValueError(f"不支持的文件格式: {ext}")
        
        try:
            loader_cls = self.loader_map[ext]
            # 对于txt文件，使用UTF-8编码
            if ext == '.txt':
                loader = loader_cls(file_path, encoding='utf-8')
            else:
                loader = loader_cls(file_path)
            documents = loader.load()
            
            # 打印文档内容预览
            for doc in documents:
                logger.info(f"文档内容预览: {doc.page_content[:200]}...")
            
            return documents
        except Exception as e:
            logger.error(f"加载文档 {file_path} 时出错: {str(e)}")
            logger.error(traceback.format_exc())
            return []
    
    def load_documents(self, directory: str):
        """加载目录中的所有支持文档"""
        logger.info(f"\n========== 开始处理目录: {directory} ==========")
        documents = []
        
        # 计数器
        processed_files = 0
        successful_files = 0
        
        for root, _, files in os.walk(directory):
            for file in files:
                file_path = os.path.join(root, file)
                _, ext = os.path.splitext(file_path.lower())
                
                if ext in self.loader_map:
                    processed_files += 1
                    try:
                        doc = self.load_document(file_path)
                        if doc:
                            documents.extend(doc)
                            successful_files += 1
                            logger.info(f"成功加载: {file_path}")
                    except Exception as e:
                        logger.error(f"加载 {file_path} 时出错: {str(e)}")
                        logger.error(traceback.format_exc())
        
        logger.info(f"处理了 {processed_files} 个文件，成功加载了 {successful_files} 个文件")
        logger.info(f"=========== 目录处理完成 ==========\n")
        return documents
    
    def split_documents(self, documents):
        """将文档分割成块"""
        logger.info(f"开始将 {len(documents)} 个文档分割成块...")
        try:
            chunks = self.text_splitter.split_documents(documents)
            logger.info(f"分割完成，生成了 {len(chunks)} 个块")
            
            # 打印一些块的统计信息
            if chunks:
                lengths = [len(chunk.page_content) for chunk in chunks]
                avg_length = sum(lengths) / len(lengths)
                logger.info(f"块长度统计: 最小={min(lengths)}, 最大={max(lengths)}, 平均={avg_length:.2f}")
                
                # 检查关键词块
                keywords = ["死亡率", "死亡", "mortality", "fatal", "fatality", "death rate"]
                keyword_chunks = []
                
                for i, chunk in enumerate(chunks):
                    for keyword in keywords:
                        if keyword in chunk.page_content:
                            keyword_chunks.append((i, keyword))
                            logger.info(f"发现包含关键词 '{keyword}' 的块 {i}: {chunk.page_content[:100]}...")
                            break
                
                if keyword_chunks:
                    logger.info(f"发现包含关键词的块: {len(keyword_chunks)} 个")
                    for idx, (chunk_idx, keyword) in enumerate(keyword_chunks[:5]):
                        logger.info(f"关键词块 {idx+1} (索引 {chunk_idx}): 包含关键词 '{keyword}'")
                else:
                    logger.warning("警告: 未在分块中找到关键词")
            
            return chunks
        except Exception as e:
            logger.error(f"分割文档时出错: {str(e)}")
            logger.error(traceback.format_exc())
            return []
    
    def process_directory(self, directory: str):
        """处理整个目录"""
        logger.info(f"\n========== 开始处理目录: {directory} ==========")
        documents = self.load_documents(directory)
        logger.info(f"加载了 {len(documents)} 个文档，准备分块...")
        
        chunks = self.split_documents(documents)
        logger.info(f"处理了 {len(documents)} 个文档，生成了 {len(chunks)} 个块")
        
        logger.info(f"========== 目录处理完成 ==========\n")
        return chunks