import sys  # 导入sys模块
sys.setrecursionlimit(30000)  # 将默认的递归深度修改为3000

from bs4 import BeautifulSoup

import urllib.request

headers = {
	"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,*/*;q=0.8",
	"Accept-Encoding": "gzip, deflate, br",
	"Accept-Language": "zh-CN,zh;q=0.8,zh-TW;q=0.7,zh-HK;q=0.5,en-US;q=0.3,en;q=0.2",
	"Cache-Control": "no-store, no-cache, must-revalidate",
	"Connection": "keep-alive",
	"Sec-Fetch-Dest": "document",
	"Sec-Fetch-Mode": "navigate",
	"Sec-Fetch-Site": "none",
	"Sec-Fetch-User": "?1",
	"Upgrade-Insecure-Requests": "1",
	"User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux aarch64; rv:96.0) Gecko/20100101 Firefox/96.0",
}

# 7、爬取数据保存到文件
fileOb = open('sigerot22.html','r',encoding='utf-8')
s = fileOb.read()
fileOb.close()

soup = BeautifulSoup(s, 'html.parser')

print( soup.title )

def dlimg( url, i ):
    response = urllib.request.Request(url,headers=headers)
    data = urllib.request.urlopen(response).read()
    fn = '%d.jpg' %i
    fileOb = open(fn,'wb+')     #打开一个文件，没有就新建一个
    fileOb.write(data)
    fileOb.close()
    print( fn )

fccc = 1;

for txid in soup.find_all(  ) :
    if txid.name == 'img' :
        # print( txid )
        url = 'http://faxing.jkb.com.cn' + txid.get('src')
        print( url )
        dlimg( url, fccc )
        fccc += 1

