from bs4 import BeautifulSoup
import os
from opencc import OpenCC
import chardet
import re  # 新增正则表达式模块

cc = OpenCC('t2s')

def detect_encoding(file_path):
    """改进的编码检测函数"""
    with open(file_path, 'rb') as f:
        rawdata = f.read(10000)  # 读取前10000字节提高检测速度
    return chardet.detect(rawdata)['encoding'] or 'GB18030'

def html_to_txt(input_dir, output_dir):
    """
    改进的HTML转TXT函数：
    1. 增强分隔符匹配逻辑
    2. 优化异常处理流程
    3. 添加进度反馈
    """
    os.makedirs(output_dir, exist_ok=True)
    
    # 获取所有HTM/HTML文件
    html_files = [f for f in os.listdir(input_dir) 
                 if f.lower().endswith(('.htm', '.html'))]
    
    for idx, filename in enumerate(html_files, 1):
        html_path = os.path.join(input_dir, filename)
        txt_filename = f"{cc.convert(os.path.splitext(filename)[0])}.txt"
        txt_path = os.path.join(output_dir, txt_filename)

        try:
            # 编码检测与读取
            encoding = detect_encoding(html_path)
            with open(html_path, "r", encoding=encoding, errors='ignore') as f:
                html_content = f.read()

            # 改进的HTML解析
            soup = BeautifulSoup(html_content, 'lxml')
            
            # 增强型分隔符匹配（允许前后空格）
            separators = soup.find_all(
                string=lambda text: re.match(r'^\s*-{80}\s*$', str(text))
            )

            if len(separators) >= 2:
                # 使用生成器获取内容
                def content_generator():
                    current = separators[0].find_next()
                    end_tag = separators[1].find_previous()
                    while current and current != end_tag:
                        if hasattr(current, 'get_text'):
                            yield current.get_text(strip=True, separator='\n')
                        current = current.find_next()
                
                text = '\n'.join(content_generator())
            else:
                # 备用内容提取方案
                main_content = soup.find(['article', 'main', 'body']) or soup
                text = main_content.get_text(separator='\n', strip=True)

            # 繁简转换与保存
            with open(txt_path, "w", encoding="utf-8") as f:
                f.write(cc.convert(text))
            
            print(f"[{idx}/{len(html_files)}] 转换成功：{filename}")

        except UnicodeDecodeError:
            print(f"编码检测失败，尝试备选编码处理：{filename}")
            try:
                with open(html_path, "r", encoding="GB18030", errors='ignore') as f:
                    # 重新执行解析逻辑...
            
            except Exception as e:
                print(f"备选方案处理失败：{filename} - {str(e)}")
        
        except Exception as e:
            print(f"处理异常：{filename} - {str(e)}")

if __name__ == "__main__":
    html_to_txt(
        input_dir=r"D:\daxue",
        output_dir=r"D:\daxue_txt"
    )