import requests
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver.common.by import By
import html2text

# 基础URL
base_url = "https://www.nfra.gov.cn/cn/view/pages/"
# 法律法规页面
# list_url = "ItemList.html?itemPId=923&itemId=927&itemUrl=ItemListRightList.html&itemName=%E6%B3%95%E5%BE%8B%E6%B3%95%E8%A7%84&itemsubPId=926"
# 政策规章规范性文件页面
# list_url = "ItemList.html?itemPId=923&itemId=928&itemUrl=ItemListRightList.html&itemName=政策规章规范性文件&itemsubPId=926#"
# 监管动态
# list_url = "ItemList.html?itemPId=914&itemId=915&itemUrl=ItemListRightList.html&itemName=监管动态#"
# 政策解读
list_url = "ItemList.html?itemPId=914&itemId=917&itemUrl=ItemListRightList.html&itemName=政策解读&itemsubPId=916"
headers = {
    "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36 Edg/120.0.0.0",
    "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",
    "Accept-Encoding": "gzip, deflate, br"
}


def scrape_page(url):
    '''
    根据url获取网页源代码

    参数：
    url：网页的URL
    '''
    if url is None or url == "":
        return ""
    # 配置Selenium（确保已安装浏览器驱动）
    options = webdriver.ChromeOptions()
    # 无头模式
    options.add_argument('--headless')
    driver = webdriver.Chrome(options=options)

    # 访问网页
    driver.get(url)

    # 获取完整页面源代码
    page_source = driver.page_source

    # 关闭浏览器
    driver.quit()

    return page_source


#


def scrape_detail_page_content(url, title):
    """
    逐个抓取文章页面内容
    """
    page_source = scrape_page(url)

    soup = BeautifulSoup(page_source, 'html.parser')
    content_tag = soup.find('div', class_='container').find(
        'div', class_='wenzhang-content')

    # 创建html2text对象
    h = html2text.HTML2Text()
    # 将html转换成markdown格式
    markdown_con = h.handle(str(content_tag))

    # 将内容写入文件
    with open(f'{title}.md', 'w', encoding='utf-8') as f:
        f.write(markdown_con)


def scrape_list_page():
    """
    抓取列表页面，并逐条抓取列表对应的详情页面内容
    """
    for pageNum in range(1, 3):
        page_source = scrape_page(base_url + list_url + '#' + str(pageNum))

        # 将抓取到的代码写入文件
        # with open('page_source.html', 'w', encoding='utf-8') as f:
        #     f.write(page_source)

        soup = BeautifulSoup(page_source, 'html.parser')

        # 找到所有
        items = soup.find("div", class_='caidan-right-div').find('div',
                                                                 class_='ng-scope').find_all('div',
                                                                                             class_='panel-row ng-scope')
        for item in items:
            a_tag = item.find('a', class_="ng-binding")
            # 提取文件名
            title = a_tag.text.strip()
            # 提取超链接地址
            link = a_tag['href']
            # 提取文件发布时间
            date = item.find('span', class_='date').text.strip()

            print(f"文章名称：{title}，链接：{link}，发布时间：{date}")

            # 逐个页面抓取
            scrape_detail_page_content(base_url + link, title)


# 代码起点
scrape_list_page()
