# -*- coding: utf-8 -*-
"""
# 使用了 [beautifulsoup4==4.12.2]，遵循其 [MIT] 许可证，原始代码来源：[https://www.crummy.com/software/BeautifulSoup/bs4/]
"""
from bs4 import BeautifulSoup
import os,shutil,re
from pathlib import Path
def extract_url_from_comment(comment):
    pattern = r'<!-- saved from url=\(\d+\)(.*?) *-->'
    match = re.search(pattern, comment)
    if match:
        return match.group(1).strip()
    return None
def clean_government_article(text):
    pattern1 = r".*?首页\s*\u00A0>\u00A0政务要闻\s*\u00A0>\u00A0首页\s*\u00A0>\u00A0湖南要闻"
    text = re.sub(pattern1, "", text, flags=re.DOTALL)
    
    pattern2 = r"（文/.*?）相关附件.*$"
    text = re.sub(pattern2, "", text, flags=re.DOTALL)

    pattern3 = r".*?首页\s*\u00A0>\u00A0政务要闻\s*\u00A0>\u00A0要闻动态"
    text = re.sub(pattern3, "", text, flags=re.DOTALL)
    
    pattern4 = r".*?首页\s*\u00A0>\u00A0政务要闻\s*\u00A0>\u00A0三湘时评"
    text = re.sub(pattern4, "", text, flags=re.DOTALL)
    
    pattern5 = r".*?星辰在线\s*?\u00A0?>\u00A0?星辰头条\s*?\u00A0?>\u00A0?星闻"
    text = re.sub(pattern5, "", text, flags=re.DOTALL)
    
    pattern6 = r".*?首页\s*?\u00A0?>\u00A0?\s*?省教育厅\s*?\u00A0?>\u00A0?\s*?就业创业\s*?\u00A0>\u00A0?\s*?政策文件"
    text = re.sub(pattern6, "", text, flags=re.DOTALL)
    
    pattern7 = r".*?首页\s*?\u00A0?>\u00A0?\s*?信息公开\s*?\u00A0?>\u00A0?\s*?通知公告\s*?\u00A0>\u00A0?\s*?科技厅通知公告"
    text = re.sub(pattern7, "", text, flags=re.DOTALL)

    pattern8 = r".*?首页\s*?\u00A0?>\u00A0?\s*?市政府\s*?\u00A0?>\u00A0?\s*?要闻动态\s*?\u00A0>\u00A0?\s*?政务动态"
    text = re.sub(pattern8, "", text, flags=re.DOTALL)
    pattern9 = r".*?首页\s*?\u00A0?>\u00A0?\s*?政策\s*?\u00A0?>\u00A0?\s*?解读"
    text = re.sub(pattern9, "", text, flags=re.DOTALL)
    pattern10 = r".*?首页\s*?\u00A0?>\u00A0?\s*?国资监管\s*?\u00A0?>\u00A0?\s*?国资国企动态"
    text = re.sub(pattern10, "", text, flags=re.DOTALL)
    
    pattern11 = r".*?首页\n?\s*?\u00A0?>\n?\u00A0?\s*?研发成果\n?\s*?\u00A0?>\n?\u00A0?\s*?最新研究成果\n?\s*?\u00A0?>\n?\u00A0?\s*?正文"
    text = re.sub(pattern11, "", text, flags=re.DOTALL)

    footer_patterns = [r"相关附件.*?$",r"信息来源：.*?$",r"责任编辑：.*?$",r"打印.*?收藏.*?$",r"适配版.*?电脑版.*?$",
                       r"站点地图.*?$",r"协办单位：.*?$",r"备案号：.*?$",r"湘公网安备.*?$",r"网站标识码.*?$",
                       r"您访问的链接即将离开.*?$",r"举报.*?$",r"专题\n智汇潇湘 才聚湖南.*?$",
                       r"这些精彩内容不要错过！小布快报.*?$",r"相关文档\n扫一扫在手机打开.*?$"]
    for pattern in footer_patterns:
        text = re.sub(pattern, "", text, flags=re.DOTALL)
        
    text=re.sub(r"　　",r"\n",text)
    return re.sub(r"\n\s*\n", "\n\n", text).strip()
def extract_html_to_md(html_path):
    with open(html_path, 'r', encoding='utf-8') as f:
        html_content = f.read()
        str_html=str(html_content)
        origin_html=extract_url_from_comment(str_html)
        soup = BeautifulSoup(html_content,'html.parser')
        for element in soup(['script', 'style', 'noscript', 'meta', 'link', 'header', 'footer']):
            element.decompose()
        full_text = soup.get_text()
        cleaned_text = "\n".join([line.strip() for line in full_text.splitlines() if line.strip()])
        cleaned_text=clean_government_article(cleaned_text)
    return f"{origin_html}\n\n{cleaned_text}"

def get_all_content(fp):
    file_list = (list(Path(r"/home/sq/html页面文件夹/").glob("*.html")) + list(Path(r"/home/sq/html页面文件夹/").glob("*.htm")))
    for html_file in file_list:    
        html_path=os.path.splitext(html_file)[0]+"_files"
        md_file = os.path.splitext(html_file)[0] + ".md"
        md_content = extract_html_to_md(html_file)
        with open(md_file, 'w', encoding='utf-8') as f:
            f.write(md_content)
        try:
            shutil.move(md_file,fp)
            print(f"成功转换HTML到Markdown! 文件保存至: {fp}")
        except:
            os.remove(md_file)
            print(f"已存在记录：{md_file}")
        os.remove(html_file)
        if os.path.isdir(html_path):
            shutil.rmtree(html_path)

if __name__ == "__main__":
    fp=r"/.../xxx/报道收集文件夹"
    get_all_content(fp)