import requests
from lxml import etree

header = {"user-agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.130 Safari/537.36"}
#r防止/被转义
url = r'http://news.whpu.edu.cn/info/1005/17072.htm'
html = requests.get(url=url, headers=header)
html.encoding = "utf-8"
page_text = html.text
#解析xpath
#tree = etree.parse(html)
tree = etree.HTML(page_text)
title = tree.xpath("//*[@id=\"content\"]/div[2]/form/div/ul/li[2]/text()")[0]
pic_urls = tree.xpath("//*[@id=\"vsb_content_2\"]//img/@src")
pic_names = [] #图片名字
for pic_url in pic_urls:
    pic_names.append(pic_url.split('/')[-1])

#写入图片，包括合并图片地址
#二个元素的遍历zip()
for pic_name,pic_url in zip(pic_names,pic_urls):
    pic_url = 'http://news.whpu.edu.cn'+pic_url
    #写入图片
    pic_content = requests.get(url=pic_url).content
    with open(pic_name, 'wb') as pp:
        pp.write(pic_content)

#print(title) # 新闻标题
#print(pic_urls) #图片地址
#print(pic_names) #图片名字

#pic_url = 'http://news.whpu.edu.cn'+pic_urls[0]

texts = tree.xpath("//div[@id=\"vsb_content_2\"]//text()")
print(texts)
#写入新闻内容操作
with open(title+".txt", "w", encoding='utf-8') as fp:
    for text in texts:
         text = text.strip() #strip()清除两边空字符
         if(text): # 为空就不写
             fp.write(text+"\n")
