# import requests
from lxml import etree
# url = "https://www.baidu.com/s?tn=68018901_45_oem_dg&ie=utf-8&wd=%E6%89%8B%E6%9C%BA"
# headers = {"User-Agent": 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/108.0.0.0 Safari/537.36'}
# response = requests.get(url, headers=headers) # 返回一个经过解码的字符串，response.text返回一个文本数据。
# text = response.content.decode('utf-8')
# # text=response.content
# html = etree.HTML(text) # 调用etree模块的HTML类，对response进行初始化，这样成功构造xpath解析对象。
# print('>>>>>>>>',html)
#
# result = html.xpath('//')   # 对html进行解析
# print('>>>>>>>',result)

import time
import requests
from bs4 import BeautifulSoup

def bian(n):
    for x in range(2,n+1):
        url = "https://pic.netbian.com/index_" + str(x) + ".html"
        # 获取源代码
        r = requests.get(url)
        r.encoding = "UTF-8"
        # print(r.text)

        # 实例化bs对象
        soup = BeautifulSoup(r.text, 'html.parser')
        for i in range(1, 21):
            time.sleep(0.2)
            address = soup.select('#main > div.slist > ul > li:nth-child(' + str(i) + ') > a')
            add = str(address[0].get('href')).split('/')[2]
            addr = add.split('.')[0]
            urls = 'https://pic.netbian.com/tupian/' + addr + '.html'
            rs = requests.get(urls)
            r.encoding = "UTF-8"
            soups = BeautifulSoup(rs.text,'html.parser')
            img = soups.select('#img > img')
            img_url = 'https://pic.netbian.com' + img[0].get('src')
            suffix = img_url.split('/')[-1]
            urlq = requests.get(img_url)
            with open('teacherImage/' + suffix, 'wb') as f:  # wb指示以二进制模式打开文件以进行写入
                f.write(urlq.content)  # 转换二进制字节
            print('>>', img_url)


yeshu = int(input('请输入需要爬几页：'))

bian(yeshu)
