# !usr/bin/env python
# -*- coding:utf-8 _*-
"""
@Author:张广勤
@Web site: https://www.tunan.wang
@Github:www.github.com
 
@File:gongbao_sx1_0.py
@Time:2024/9/3 7:45

@Motto:不积跬步无以至千里，不积小流无以成江海！
"""
import requests
from bs4 import BeautifulSoup
import re  # 导入正则表达式模块

# 定义URL
# url = 'http://tjj.shanxi.gov.cn/tjsj/tjgb/ndtjgb/index.shtml'
urls = ['http://tjj.shanxi.gov.cn/tjsj/tjgb/ndtjgb/index.shtml','http://tjj.shanxi.gov.cn/tjsj/tjgb/ndtjgb/index_1.shtml']
titles = []
hrefs = []

for url in urls:
    # 发送请求
    response = requests.get(url)

    # 检查请求是否成功
    if response.status_code == 200:
        response.encoding = 'utf-8'  # 设置正确的编码方式
        # 解析HTML
        soup = BeautifulSoup(response.text, 'html.parser')

        # 找到包含公报标题和链接的元素
        # 假设公报标题和链接在<a>标签中，并且这些标签位于某个<div>容器内
        reports_container = soup.find('div', class_='ndtjgb clearfix')  # 根据实际情况调整class名称
        # titles = []
        # hrefs = []
        if reports_container:
            # 查找所有的<a>标签
            links = reports_container.find_all('a')
            # 获取基础URL
            base_url = url.rsplit('/', 1)[0]

            # 打印所有找到的公报标题及其链接地址
            for link in links:
                href = link.get('href')
                if href and './' in href:  # 只处理相对链接
                    # 获取链接地址
                    absolute_href = f"{base_url}/{href.replace('./', '')}"

                    # 提取标题（假设标题与日期之间使用 - 隔开）#空格分隔
                    title_parts = re.split(r' ', link.text.strip())
                    if len(title_parts) > 1:
                        title = title_parts[0].rstrip()  # 取分割后的第一部分作为标题
                    else:
                        title = link.text.strip()  # 如果没有分割，则直接使用全部文本作为标题

                    print(f"Title: {title}")
                    print(f"Link: {absolute_href}\n")
                    titles.append(title)
                    hrefs.append(absolute_href)
        else:
            print("No reports container found.")
    else:
        print("Failed to retrieve the webpage:", response.status_code)

print(titles)
print(hrefs)