import requests
from bs4 import BeautifulSoup
from docx import Document
import re
from sumy.parsers.plaintext import PlaintextParser
from sumy.nlp.tokenizers import Tokenizer
from sumy.summarizers.lsa import LsaSummarizer

def crawl_with_rules(url, rules):
    try:
        # 发送 HTTP 请求获取网页内容
        response = requests.get(url)
        response.raise_for_status()  # 检查请求是否成功
        # 使用 BeautifulSoup 解析 HTML
        soup = BeautifulSoup(response.text, 'html.parser')
        results = []
        # 根据规则提取数据
        for rule in rules:
            tag = rule.get('tag', 'a')  # 默认标签为 'a'
            attrs = rule.get('attrs', {})
            elements = soup.find_all(tag, attrs=attrs)
            for element in elements:
                if tag == 'a':
                    href = element.get('href')
                    if href:
                        results.append(href)
                # 可以根据需要添加更多规则处理
        return results
    except requests.RequestException as e:
        print(f"请求出错: {e}")
        return []

def extract_text_with_rules(url, rules):
    try:
        # 发送 HTTP 请求获取网页内容
        response = requests.get(url)
        response.raise_for_status()  # 检查请求是否成功
        # 使用 BeautifulSoup 解析 HTML
        soup = BeautifulSoup(response.text, 'html.parser')
        text = ""

        # 提取所有 head 标签内的正文
        div_elements = soup.find_all('p',attrs={'class':'content-title'})
        for div in div_elements:
            div_text = div.get_text()
            text += div_text.strip()
        text += "\n"

        # 提取所有 div 标签内的正文
        div_elements = soup.find_all('div',attrs={'class':'tender-content'})
        for div in div_elements:
            div_text = div.get_text()
            text += div_text.strip()
        return text
    except requests.RequestException as e:
        print(f"请求出错: {e}")
        return ""

def generate_summary(text, num_sentences=3):
    # 创建一个解析器和分词器
    parser = PlaintextParser.from_string(text, Tokenizer("chinese"))
    # 使用 LSA 摘要算法
    summarizer = LsaSummarizer()
    # 生成摘要
    summary = summarizer(parser.document, num_sentences)
    summary_text = "".join([str(sentence) for sentence in summary])
    return summary_text

if __name__ == "__main__":
    target_url = "http://www.gwdt.cn/zb/search.php?kw=%E6%8B%9B%E5%95%86%E9%93%B6%E8%A1%8C%E4%BF%A1%E7%94%A8%E5%8D%A1&areaid=0&type=0"  # 替换为你要爬取的网址
    # 定义规则，这里示例为提取所有链接
    link_rules = [
        {
            "tag": "a",
            "attrs": {"href": re.compile(r'www.gwdt.cn/g-zb')}
        }
    ]
    all_links = crawl_with_rules(target_url, link_rules)
    # 定义文本抽取规则
    text_rules = [
        {
            "tag": "",
            "attrs": {}
        }
    ]
    # 创建一个新的 Word 文档
    doc = Document()
    for link in all_links:
        print(f"链接: {link}")
        # 提取链接页面的正文并保留格式
        text = extract_text_with_rules(link, text_rules)
        print(f"正文内容: {text[:200]}...")  # 打印前 200 个字符
        # 生成摘要
        summary = generate_summary(text)
        print(f"摘要内容: {summary}")
        # 将链接、摘要和正文内容添加到 Word 文档中
        doc.add_heading(link, level=1)
        doc.add_heading("摘要", level=2)
        doc.add_paragraph(summary)
        doc.add_heading("正文", level=2)
        doc.add_paragraph(text)

    # 保存 Word 文档
    doc.save('output.docx')