from elasticsearch import Elasticsearch, helpers
from bs4 import BeautifulSoup, Comment
import re
import logging
import unicodedata
import html

# 设置日志
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger()

# Elasticsearch 配置
ES_HOST = "https://localhost:9200"
ES_API_KEY = "T0lqM2RKWUI4clBKeFVBenFaZGc6MTBWZmU3d3VSX0dHQy1iNjBfbHRyUQ=="
ES_CA_CERTS = "/srv/elasticsearch-8.12.2/config/certs/http_ca.crt"
INDEX_NAME = "file_hash"

# 创建Elasticsearch客户端
es = Elasticsearch(
    hosts=[ES_HOST],
    api_key=ES_API_KEY,
    ca_certs=ES_CA_CERTS,
    verify_certs=True,
    retry_on_timeout=True
)

def is_gibberish(text):
    """检测文本是否包含过多私有区域字符"""
    if not text:
        return False
    
    # 检测私有区域字符 (Unicode私有区域: U+E000-U+F8FF, U+F0000-U+FFFFD, U+100000-U+10FFFD)
    private_chars = re.findall(r'[\ue000-\uf8ff\U000f0000-\U000ffffd\U00100000-\U0010fffd]', text)
    return len(private_chars) > 100

def clean_html_content(raw_content):
    """高效处理标准/非标准HTML，提取纯净文本内容"""
    if not raw_content or is_gibberish(raw_content):
        return ""
    
    try:
        # 预处理：解码HTML实体、修复常见问题
        decoded = html.unescape(raw_content)
        decoded = re.sub(r'<([^>]+?)(?<!/)>', r'<\1>', decoded)  # 修复未闭合标签
        decoded = re.sub(r'\s+', ' ', decoded)  # 压缩空白字符
        
        # 使用lxml解析器处理标准/非标准HTML
        soup = BeautifulSoup(decoded, 'lxml')
        
        # 移除无关内容
        for element in soup(["script", "style", "head", "meta", 
                            "noscript", "link", "iframe", "svg", 
                            "form", "button", "input", "select"]):
            element.decompose()
            
        # 移除HTML注释
        for comment in soup.find_all(string=lambda text: isinstance(text, Comment)):
            comment.extract()
        
        # 块级元素前后添加换行（保持结构）
        for tag in soup.find_all(['p', 'div', 'h1', 'h2', 'h3', 'h4', 
                                'h5', 'h6', 'ul', 'ol', 'li', 'table',
                                'tr', 'th', 'td', 'br', 'hr']):
            if tag.name == 'br' or tag.name == 'hr':
                tag.insert_after('\n')
            else:
                tag.insert_before('\n')
                tag.insert_after('\n')
        
        # 获取纯净文本并后处理
        text = soup.get_text(separator=' ', strip=True)
        text = re.sub(r'\n{3,}', '\n\n', text)  # 压缩多余空行
        text = re.sub(r'[ \t]{2,}', ' ', text)  # 压缩多余空格
        
        return text.strip()
    
    except Exception:
        # 终极降级处理：暴力移除所有标签
        return re.sub(r'<[^>]+>', '', raw_content)

def clean_text_content(raw_content):
    """强大的文本清洗功能"""
    if not raw_content:
        return ""
    
    # 1. 预处理HTML内容
    if '<' in raw_content and '>' in raw_content:
        cleaned = clean_html_content(raw_content)
    else:
        cleaned = unicodedata.normalize('NFKC', raw_content)
    
    # 2. 检测并清除包含过多私有区域字符的文档
    if is_gibberish(cleaned):
        return ""
    
    # 3. 空格处理: 多个空格转为一个，保留单个空格
    cleaned = re.sub(r' {2,}', ' ', cleaned)
    
    # 4. 换行处理: 多个换行转为两个，保留单个换行
    cleaned = re.sub(r'\n{3,}', '\n\n', cleaned)
    
    return cleaned.strip()

def process_documents():
    """处理文档主函数"""
    query = {
        "bool": {
            "must": [
                {"term": {"status": "completed"}},
                {"exists": {"field": "content"}}
            ]
        }
    }

    scroll_size = 200
    actions = []
    processed_count = 0
    updated_count = 0
    total_docs = 0
    scroll_id = None
    
    try:
        # 获取初始文档集
        resp = es.search(
            index=INDEX_NAME,
            scroll='10m',
            size=scroll_size,
            query=query,
            _source=["content"]
        )
        scroll_id = resp['_scroll_id']
        total_docs = resp['hits']['total']['value']
        
        logger.info(f"找到 {total_docs} 个待处理文档")

        while resp['hits']['hits']:
            for hit in resp['hits']['hits']:
                doc_id = hit['_id']
                content = hit['_source'].get("content", "")
                processed_count += 1
                
                # 进度报告
                if processed_count % 500 == 0:
                    logger.info(f"已处理 {processed_count}/{total_docs} 个文档 ({processed_count/total_docs:.1%})")
                
                # 清理内容
                cleaned = clean_text_content(content)
                
                # 仅当内容改变时才创建操作
                if cleaned != content:
                    actions.append({
                        "_op_type": "update",
                        "_index": INDEX_NAME,
                        "_id": doc_id,
                        "doc": {"content": cleaned}
                    })
                    updated_count += 1
                
                # 批量操作
                if len(actions) >= scroll_size:
                    helpers.bulk(es, actions)
                    actions = []
            
            # 获取下一批文档
            resp = es.scroll(scroll_id=scroll_id, scroll='10m')
            scroll_id = resp['_scroll_id']
        
        # 处理剩余操作
        if actions:
            helpers.bulk(es, actions)
        
        logger.info(f"处理完成! 总计: {processed_count}, 更新: {updated_count}")

    except Exception as e:
        logger.error(f"文档处理出错: {str(e)}", exc_info=True)
    finally:
        # 清理滚动
        if scroll_id:
            try:
                es.clear_scroll(scroll_id=scroll_id)
            except Exception as e:
                logger.warning(f"滚动清理失败: {str(e)}")

if __name__ == "__main__":
    try:
        # 连接验证
        if not es.ping():
            raise ValueError("无法连接到Elasticsearch")
        
        # 索引验证
        if not es.indices.exists(index=INDEX_NAME):
            logger.error(f"索引 {INDEX_NAME} 不存在")
            exit(1)
        
        logger.info("开始文档清洗处理...")
        process_documents()
        logger.info("处理完成")
    except Exception as e:
        logger.error(f"初始化失败: {str(e)}", exc_info=True)
        exit(1)