import requests
from bs4 import BeautifulSoup
from lxml import etree

headers = {
    "Referer": "https://paper.people.com.cn/rmrb/pc/layout/202506/23/node_03.html",
    "Upgrade-Insecure-Requests": "1",
    "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/137.0.0.0 Safari/537.36 Edg/137.0.0.0",
    "sec-ch-ua": "\"Microsoft Edge\";v=\"137\", \"Chromium\";v=\"137\", \"Not/A)Brand\";v=\"24\"",
    "sec-ch-ua-mobile": "?0",
    "sec-ch-ua-platform": "\"Windows\""
}
count = 0
# for page in range(1, 21):
#     url = f"https://paper.people.com.cn/rmrb/pc/layout/202506/23/node_0{page}.html"
#     if page >= 10:
#         url = f"https://paper.people.com.cn/rmrb/pc/layout/202506/23/node_{page}.html"
#     response = requests.get(url, headers=headers)
#
#     html_str = response.content.decode()
#     root = BeautifulSoup(html_str, "lxml")
#     journalism_list = root.select("div.news ul.news-list li")
#     # print(journalism_list)
#     for journalism in journalism_list:
#         count += 1
#         news_title = journalism.select_one("a").text.strip()
#         news_url = "https://paper.people.com.cn/rmrb/pc/" + journalism.select_one("a")["href"].strip("../")
#         print(count, news_title, news_url)

for page in range(1, 21):
    url = f"https://paper.people.com.cn/rmrb/pc/layout/202506/23/node_0{page}.html"
    if page >= 10:
        url = f"https://paper.people.com.cn/rmrb/pc/layout/202506/23/node_{page}.html"
    response = requests.get(url, headers=headers)
    html_str = response.content.decode()
    root = etree.HTML(html_str)
    news_list = root.xpath('//div[@class="news"]/ul/li')
    for news in news_list:
        count += 1
        news_title = "".join(news.xpath('.//a/text()')).strip()
        news_url = "https://paper.people.com.cn/rmrb/pc/" + "".join(news.xpath('.//a/@href')).strip("../")
        print(count, news_title, news_url)
