# 美食杰
# import requests
# from bs4 import BeautifulSoup
# headers = {
#     'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/103.0.0.0 Safari/537.36'  #伪装成浏览器
# }
# url = 'https://www.meishij.net/fenlei/xiafancai/'
# r = requests.get(url, headers=headers)
# r.encoding = "UTF-8"
# soup = BeautifulSoup(r.text, 'html.parser')
#
# td = soup.find_all('a',class_="list_s2_item_info")
# for i in td:
#     name = i.strong.string
#     ingredient = i.span.string
#     print({'name':name,'ingredient':ingredient})


# 拓展迷
# import requests
# from bs4 import BeautifulSoup
# headers = {
#     'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/103.0.0.0 Safari/537.36'  #伪装成浏览器
# }
# url = 'https://www.extfans.com/'
# r = requests.get(url, headers=headers)
# r.encoding = "UTF-8"
# soup = BeautifulSoup(r.text, 'html.parser')
#
# td = soup.find_all('div',class_='p-3 app-item')
# zi = {}
# for i in td:
#     name = i.div.a.get('title')
#     href = i.div.a.get('href')
#     brief = i.find_all('div',class_='item-bottom')
#     for x in brief:
#         briefs = x.string
#         print({'title':name,'brief':briefs,'href':'https://www.extfans.com' + href})


# 彼岸网
import requests
from bs4 import BeautifulSoup
import time
headers = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/103.0.0.0 Safari/537.36'  #伪装成浏览器
}
for q in range(2,20):
    time.sleep(3)
    url = 'https://pic.netbian.com/index_' + str(q) + '.html'
    r = requests.get(url, headers=headers)
    r.encoding = "UTF-8"
    soup = BeautifulSoup(r.text, 'html.parser')

    td = soup.find_all('img')
    for i in td:
        if i.get('src') != None:
            url = 'https://pic.netbian.com' + i.get('src')
            suffix = url.split('/')[-1]
            url = requests.get(url)
            with open('teacherImage/' + suffix, 'wb') as f:
                f.write(url.content)
            print('https://pic.netbian.com' + i.get('src'))







