import requests
from lxml import etree
import os

root_dir = '校新闻'
if not os.path.isdir(root_dir):
    os.mkdir(root_dir)

header = {"user-agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.130 Safari/537.36"}

#http://news.whpu.edu.cn/xw/xxyw.htm
#http://news.whpu.edu.cn/xw/xxyw/180.htm
#r防止/被转义
#以 f开头表示在字符串内支持大括号内的python 表达式
url = r'http://news.whpu.edu.cn/xw/xxyw.htm'
html = requests.get(url=url, headers=header)
html.encoding = "utf-8"
page_text = html.text
tree = etree.HTML(page_text)
pages_str = tree.xpath("//*[@id=\"fanye153388\"]/text()")[0]
#print(pages_str)
#总页码
page_nums = pages_str.split('/')[-1]
page_nums = int(page_nums)
#print(page_nums)
##################################
#range(start, stop, [step])，每次加上-1，直到0，但是不 包含0
#for i in range(page_nums, 0, -1): #page_nums到 1
all_url_list=[]
for i in range(page_nums, 179, -1):  #测试
    if(i == page_nums):#特殊情况
        url = r'http://news.whpu.edu.cn/xw/xxyw.htm'
        # html = requests.get(url=url, headers=header)
        # html.encoding = "utf-8"
        # page_text = html.text
        # tree = etree.HTML(page_text)
        # urls = tree.xpath("//*[@id=\"content\"]/div[4]/ul//a/@href")
        # urls = str(urls).replace("..", "http://news.whpu.edu.cn")
        # print(urls)
    else:
        url = rf'http://news.whpu.edu.cn/xw/xxyw/{i}.htm'
        #print(i)
    html = requests.get(url=url, headers=header)
    html.encoding = "utf-8"
    page_text = html.text
    tree = etree.HTML(page_text)
    urls = tree.xpath("//*[@id=\"content\"]/div[4]/ul//a/@href")

    for url_one in urls:
        url_one = url_one.replace('../..', 'http://news.whpu.edu.cn')
        url_one = url_one.replace('..', 'http://news.whpu.edu.cn')
        #print(url_one)
        all_url_list.append(url_one)
    print(len(all_url_list))

print(all_url_list)
print(len(all_url_list))













