import sys  # 导入sys模块
sys.setrecursionlimit(30000)  # 将默认的递归深度修改为3000

from bs4 import BeautifulSoup

import urllib.request

headers = {
    "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,*/*;q=0.8",
    "Accept-Encoding": "gzip, deflate, br",
    "Accept-Language": "zh-CN,zh;q=0.8,zh-TW;q=0.7,zh-HK;q=0.5,en-US;q=0.3,en;q=0.2",
    "Cache-Control": "max-age=0",
    "Connection": "keep-alive",
    "Host": "ss2.meipian.me",
    "If-Modified-Since": "Sat, 27 Feb 2021 02:57:18 GMT",
    "If-None-Match": "FuY0u3vfgExdlfLFlezeVCDGTiKV",
    "Sec-Fetch-Dest": "document",
    "Sec-Fetch-Mode": "navigate",
    "Sec-Fetch-Site": "cross-site",
    "TE": "trailers",
    "Upgrade-Insecure-Requests": "1",
    "User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux aarch64; rv:96.0) Gecko/20100101 Firefox/96.0",
}

# 7、爬取数据保存到文件
fileOb = open('sigerot22.html','r',encoding='utf-8')
s = fileOb.read()
fileOb.close()

soup = BeautifulSoup(s, 'html.parser')

print( soup.title )

def dlimg( url, i ):
    response = urllib.request.Request(url,headers=headers)
    data = urllib.request.urlopen(response).read()
    fn = '%d.jpg' %i
    fileOb = open(fn,'wb+')     #打开一个文件，没有就新建一个
    fileOb.write(data)
    fileOb.close()
    print( fn )

fccc = 1;

for txid in soup.find_all(  ) :
    if txid.name == 'img' :
        url = txid.get('src')
        if url.find('?',0,len(url)) > 0 :
            print( url[0:url.find('?',0,len(url))] )
            dlimg( url, fccc )
            fccc += 1

