#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
HTML文本提取工具
根据<!DOCTYPE html>分割多个HTML文档，提取纯文本内容并输出到output.txt
"""

from bs4 import BeautifulSoup
import sys
import os
import re

# ==================== 参数设置 ====================
print("HTML文本提取工具启动...")

# 从命令行获取输入文件名
if len(sys.argv) > 1:
    input_file = sys.argv[1]
    print(f"从命令行获取输入文件: {input_file}")
else:
    input_file = os.path.join(os.path.dirname(__file__), "../cqut_content.html")

output_file = "output.txt"
print(f"输出文件: {output_file}")

# ==================== 文件读取 ====================
print("\n正在读取输入文件...")

if not os.path.exists(input_file):
    print(f"错误: 文件 {input_file} 不存在")
    sys.exit(1)

try:
    with open(input_file, 'r', encoding='utf-8') as f:
        file_content = f.read()
    print(f"成功读取文件，大小: {len(file_content)} 字符")
except Exception as e:
    print(f"读取文件失败: {e}")
    sys.exit(1)

# ==================== HTML文档分割 ====================
print("\n正在分割HTML文档...")

# 按<!DOCTYPE html>分割多个HTML文档
html_parts = file_content.split('<!DOCTYPE html>')

# 第一部分可能是空的或者是其他内容，从第二部分开始处理
html_documents = []
for i, part in enumerate(html_parts):
    if i == 0:
        # 第一部分如果不包含html标签就跳过
        if '<html' not in part.lower():
            continue
        html_documents.append(part)
    else:
        # 重新添加DOCTYPE声明
        html_documents.append('<!DOCTYPE html>' + part)

print(f"发现 {len(html_documents)} 个HTML文档")

# ==================== 文本提取 ====================
print("\n正在提取文本内容...")

all_text = []

for i, html_doc in enumerate(html_documents, 1):
    print(f"处理第 {i} 个HTML文档...")
    
    try:
        # 使用BeautifulSoup解析HTML
        soup = BeautifulSoup(html_doc, 'html.parser')
        
        # 移除不需要的标签内容
        for tag in soup(["script", "style", "noscript", "meta", "link", "head"]):
            tag.decompose()
        
        # 移除HTML注释
        html_no_comments = re.sub(r'<!--.*?-->', '', str(soup), flags=re.DOTALL)
        soup = BeautifulSoup(html_no_comments, 'html.parser')
        
        # 提取纯文本，使用换行符分隔
        text_content = soup.get_text(separator='\n', strip=True)
        
        # 清理文本内容
        if text_content:
            lines = text_content.split('\n')
            cleaned_lines = []
            for line in lines:
                line = line.strip()
                # 只保留长度超过20个字符的行
                if len(line) > 23:
                    cleaned_lines.append(line)
            
            text_content = '\n'.join(cleaned_lines)
            
            if text_content:
                all_text.append(f"=== 文档 {i} ===\n{text_content}\n")
                print(f"提取到 {len(text_content)} 个字符")
                
                # 显示前200个字符作为预览
                preview = text_content[:200] + "..." if len(text_content) > 200 else text_content
                print(f"预览: {preview}")
            else:
                print(f"文档 {i} 清理后未提取到有效文本")
        else:
            print(f"文档 {i} 未提取到有效文本")
            
    except Exception as e:
        print(f"处理文档 {i} 时出错: {e}")
        continue

# ==================== 内容合并 ====================
print(f"\n合并所有文本内容...")

final_text = '\n'.join(all_text)
print(f"总共提取到 {len(final_text)} 个字符")

# ==================== 文件输出 ====================
print(f"\n正在保存到 {output_file}...")

try:
    with open(output_file, 'w', encoding='utf-8') as f:
        f.write(final_text)
    print(f"✅ 成功保存到 {output_file}")
    print(f"文件大小: {len(final_text)} 字符")
except Exception as e:
    print(f"保存文件失败: {e}")
    sys.exit(1)

print("\n🎉 HTML文本提取完成！")
