# !usr/bin/env python
# -*- coding:utf-8 _*-
"""
@Author:张广勤
@Web site: https://www.tunan.wang
@Github:www.github.com
 
@File:gongbao_sx4_0.py
@Time:2024/9/4 16:36

@Motto:不积跬步无以至千里，不积小流无以成江海！
"""

from bs4 import BeautifulSoup
from docx import Document
import requests
import os
import re

def url2docx(url):
    # 发送请求
    response = requests.get(url)

    # 检查请求是否成功
    if response.status_code == 200:
        response.encoding = "utf-8"  # 设置正确的编码
        # 解析 HTML
        soup = BeautifulSoup(response.text, 'html.parser')
        # pattern = re.compile(
        #     r'TRS_PreAppend|view TRS_UEDITOR trs_paper_default trs_word|TRS_Editor|view TRS_UEDITOR trs_paper_default trs_external|detail-text-content mhide')
        # 找到包含公报内容的 div 元素（这里需要根据实际 HTML 结构修改选择器）
        # 假设公报内容在一个特定的 class 为'content'的 div 中
        # pattern = re.compile(r'TRS_PreAppend')
        reports_div = soup.find('div', class_='TRS_PreAppend')  # 修改这里的 class_值以匹配你的 HTML

        # 找到标题
        reports_title = soup.find('div', class_='detail-title').find('h1')
        reports_title = reports_title.text.strip().replace('\n', '') if reports_title else 'Untitled'

        if reports_div:
            # 创建一个新的 Word 文档
            doc = Document()

            # 添加标题
            doc.add_heading(reports_title, 0)

            # 在找到的 div 元素内部查找所有的 p 标签，并排除表格内容
            report_paragraphs = reports_div.find_all(lambda tag: tag.name in ['p', 'h2', 'h3', 'h4', 'h5', 'h6'] and tag.find_parent('table') is None, recursive=False)
            # 添加所有找到的公报内容到 Word 文档中，忽略空行
            for paragraph in report_paragraphs:
                text = paragraph.text.strip()
                if text:
                    doc.add_paragraph(text)

            # 保存文档
            output_dir = 'gongbao_quanguo/output_docx'
            if not os.path.exists(output_dir):
                os.makedirs(output_dir)
            doc.save(os.path.join(output_dir, reports_title + '.docx'))
            print(f"{reports_title}\nDocument saved successfully.")
            return os.path.join(output_dir, reports_title + '.docx')
        else:
            print(f"{reports_title}\nNo <div> element found with the specified class.")
    else:
        print("Failed to retrieve the webpage:", response.status_code)

# 调用函数
# url = 'https://www.stats.gov.cn/sj/zxfb/202402/t20240228_1947915.html'  # 注意：这里是一个示例 URL，请替换为实际 URL
# url2docx(url)
