'''实战案例1_爬取彼岸桌面壁纸
网址： http://www.netbian.com/shouji/meinv/index.htm

爬取分页规则：
    http://www.netbian.com/shouji/meinv/index.htm
    http://www.netbian.com/shouji/meinv/index_2.htm
    http://www.netbian.com/shouji/meinv/index_3.htm   '''

import urllib.request
from bs4 import BeautifulSoup
import os


# 基础示例
# 注意：彼岸桌面 文件格式gbk  <meta charset="gbk" />
url = 'http://www.netbian.com/shouji/meinv/index_2.htm'

headers = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/103.0.0.0 Safari/537.36',
    'Cookie': '__yjs_duid=1_3be0441d68979f0ad37659a8318731ed1683266960587; Hm_lvt_0f461eb489c245a31c209d36e41fcc0f=1683266961; trenvecookieinforecord=%2C26-31025%2C26-30337%2C; trenvecookieclassrecord=%2C26%2C; Hm_lpvt_0f461eb489c245a31c209d36e41fcc0f=1683267598'
}

request = urllib.request.Request(url=url, headers=headers)
response = urllib.request.urlopen(request)
content = response.read().decode('gbk')
# print(content)

# 使用bs4
soup = BeautifulSoup(content, 'lxml')
# print(soup)

img_url_list = soup.select('.list > ul > li > a > img')
img_name_list = soup.select('.list > ul > li > a > b')
# print(img_url_list)
# print(img_name_list)
for index in range(len(img_url_list)):
    # 获取属性
    item_attrs = img_url_list[index].attrs
    # 获取图片路径
    img_url = item_attrs.get('src')
    # 获取图片名称
    img_name = img_name_list[index].get_text()
    img_name = img_name.replace(' ','')
    print(img_name, img_url)

# print(type(img_name_list[0].get_text()), img_name_list[0].get_text())





# --------------可下载指定页数壁纸-------------------------------------

'''# 构造请求体，并返回响应体
def build_request(page):
    # 注意：彼岸桌面 文件格式gbk  <meta charset="gbk" />
    if page == 1:
        url = 'http://www.netbian.com/shouji/meinv/index.htm'
    else:
        url = 'http://www.netbian.com/shouji/meinv/index_{0}.htm'.format(str(page))

    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/103.0.0.0 Safari/537.36',
        'Cookie': '__yjs_duid=1_3be0441d68979f0ad37659a8318731ed1683266960587; Hm_lvt_0f461eb489c245a31c209d36e41fcc0f=1683266961; trenvecookieinforecord=%2C26-31025%2C26-30337%2C; trenvecookieclassrecord=%2C26%2C; Hm_lpvt_0f461eb489c245a31c209d36e41fcc0f=1683267598'
    }

    request = urllib.request.Request(url=url, headers=headers)
    response = urllib.request.urlopen(request)
    content = response.read().decode('gbk')
    return content

# 使用bs4解析
def parse(content):
    # 使用bs4
    soup = BeautifulSoup(content, 'lxml')
    # print(soup)
    #j 获取指定标签
    img_url_list = soup.select('.list > ul > li > a > img')
    img_name_list = soup.select('.list > ul > li > a > b')
    # print(img_url_list)
    # print(img_name_list)
    imgNameList = []
    imgUrlList = []
    for index in range(len(img_url_list)):
        # 获取属性
        item_attrs = img_url_list[index].attrs
        # 获取图片路径
        img_url = item_attrs.get('src')
        # 获取图片名称
        img_name = img_name_list[index].get_text()
        img_name = img_name.replace(' ', '')
        # print(img_name, img_url)
        imgUrlList.append(img_url)
        imgNameList.append(img_name)
    # 封装返回体 {'imgNameList':[], 'imgUrlList':[]}
    res = {}
    res['imgNameList'] = imgNameList
    res['imgUrlList'] = imgUrlList
    return res


# 下载图片到本地
def down_img(page, img_name_list, img_url_list):
    local_path = 'D:\\hxy\\test\\爬取彼岸桌面壁纸'
    local_path = local_path + '\\'+ str(page)
    existFlag = os.path.exists(local_path)
    if existFlag == False:
        # 文件不存在，创建
        os.mkdir(local_path)

    for index in range(len(img_name_list)):
        img_url = img_url_list[index]
        img_name = img_name_list[index]
        print(img_name, img_url)
        save_path = local_path +'\\'+img_name+'.jpg'
        # 下载网络图片 https://pic.3gbizhi.com/uploads/20220716/f4831b1c91556fc8a04163ccdd0353f0.jpg
        urllib.request.urlretrieve(img_url, save_path)


if __name__ == '__main__':
    start = int(input('请输入起始页：'))
    end = int(input('请输入终止页：'))
    # 当 start 小于 1 默认赋予 1；
    start = 1 if start < 1 else start
    print(start, end)
    for page in range(start, end+1):
        print(page)
        content = build_request(page)
        dict_res = parse(content)
        print(dict_res)
        imgUrlList = dict_res.get('imgUrlList')
        imgNameList = dict_res.get('imgNameList')
        down_img(page, imgNameList, imgUrlList)
'''