#!/usr/bin/env python
# -*- coding: UTF-8 -*-
'''
@Project ：PythonData 
@File    ：case6_term.py
@Author  ：朱志文
@Date    ：2021/12/29 15:19 
'''
# http://www.hbitu.cn/c_tzgg.php
'''淮北理工学院新闻'''
import requests,bs4
def main():
    urls=['c_tzgg','c_xxxw','c_ybxx']
    lst_news,lst_href=[],[]
    for url in urls:
        for page in range(1,10):
            resp=requests.get(
                url=f'http://www.hbitu.cn/{url}.php?page={page}',
                headers={'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/96.0.4664.110 Safari/537.36'}
            )
            resp.raise_for_status()
            resp.encoding=resp.apparent_encoding
            # 创建BeautifulSoup对象
            soup=bs4.BeautifulSoup(resp.text,'lxml')
            # 通过CSS选择器从页面中提取新闻的标题和时间
            news_li=soup.select('#newslist > div.nlist > ul > li> a')
            date_span=soup.select('#newslist > div.nlist > ul > li > span')
            for new,date in zip(news_li,date_span):
                lst_news.append(new.text)
                lst_href.append(new.get('href'))
                # print(new.text,new.get('href'),date.text)
    return lst_news,lst_href

n = 0
fout = open('huaibei.html', 'w')
fout.write("<html>")  # 设置输出的html文件的格式
fout.write("<body>")
fout.write("<table>")

res_data = {}
lst_news,lst_href=main()
print(lst_href)
for new,href in zip(lst_news,lst_href):

    res_data['url'] = new
    res_data['content'] = href

    fout.write("<tr>")
    fout.write("<td>%s</td>" % res_data['url'])
    fout.write("<td>%s</td>" % res_data['content'])
    fout.write("</tr>")
    n += 1
print('共发布了%d条信息' % n)

fout.write("</table>")
fout.write("</body>")
fout.write("</html>")

'''淮北理工学院图片'''

import requests,bs4,os
from lxml import etree

def getNetPict(url):
    root='C://TEMP//'
    for i in range(1,33):
        path=f'{root}{i}.jpg'
        try:
            if not os.path.exists(root):#判断文件夹是否存在
                os.mkdir(root) #创建文件夹
            if not os.path.exists(path): #判断文件是否存在
                r=requests.get(url)
                with open(path,'wb') as f:
                    f.write(r.content)#写入文件
                    f.close()#关闭
                    print("文件保存成功");
            else:
                print('文件已存在')
        except:
            print('爬取')



for page in range(17,44):
    resp=requests.get(
        url=f'http://www.hbitu.cn/c_ybxx.php?id={page}',
        headers={'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/96.0.4664.110 Safari/537.36'}
    )
    resp.raise_for_status()
    resp.encoding=resp.apparent_encoding
    tree=etree.HTML(resp.text)
    # 通过XPath语法从页面中提取图片
    img_tree=tree.xpath('//*[@id="newsshow"]/div[3]/p/img')
    for i in img_tree:
        href=f'http://www.hbitu.cn/{i.get("src")}'
        getNetPict(href)


