#!/usr/bin/env python3
# -*- coding: utf-8 -*-

"""
@author: Reido
@time: 2021/12/5 4:48 下午
"""
import os

import requests
from lxml import etree


class PicDown:
    """
    参数介绍：
    headers：浏览器请求头信息，保函 user-agent，cookie
    pics_dict: 文件下载位置
    domain_name: 域名 eg: www.baidu.com
    parse_root_node_xpath: xpath 针对首页 url 在所在的节点的父节点标签定位 //div[@class="divName"]
    parse_src_xpath: xpath 针对首页 url 基于所在节点的父节点下的标签定位    ./a/@href
    parse_detail_xpath: xpath 针对详情页 url 所在的节点下的标签定位 ./a/@href 或./a/@src
    main_url: 需要爬取图片的首页
    next_url: 需要爬取图片的第二页（默认为 None）
    """

    def __init__(self, headers, pics_dic, domain_name,
                 parse_root_node_xpath, parse_src_xpath,
                 parse_detail_xpath, main_url, next_url=None):
        self.headers = headers
        self.pics_dic = pics_dic
        self.domain_name = domain_name
        self.parse_root_node_xpath = parse_root_node_xpath
        self.parse_src_xpath = parse_src_xpath
        self.parse_detail_xpath = parse_detail_xpath
        self.main_url = main_url
        self.next_url = next_url

    def download(self, src):
        filename = src.rsplit("/")[-1].split(".")[0]
        resp = requests.get(src, self.headers).content
        with open("./{}/{}.png".format(self.pics_dic, filename), 'wb') as fp:
            fp.write(resp)
        print(filename + "下载完成！")

    def parse_detail(self, url):
        tree = self.parse_tree(url=url)
        src = tree.xpath(self.parse_src_xpath)[0]
        # print(src)
        self.download(src)

    def parse(self, url):
        # url = self.main_url
        tree = self.parse_tree(url=url)
        li = tree.xpath(self.parse_root_node_xpath)
        for i in li:
            detail_url = self.domain_name + i.xpath(self.parse_detail_xpath)[0]
            # print(detail_url)
            self.parse_detail(url=detail_url)

    def parse_tree(self, url):
        response = requests.get(url=url, headers=self.headers)
        response.encoding = response.apparent_encoding
        response = response.text
        tree = etree.HTML(response)
        return tree


if __name__ == '__main__':
    headers = {
        'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/96.0.4664.55 Safari/537.36',
        'cookie': 'wa_session=scgqht67o3f381ml4au33utma8bh6rcfv3q1cqqh38a5sd0isslig26fqctdn9r3as59j2v5tn82t7aha9pakhtsds2j48ralamapf1; bfp_sn_rf_8b2087b102c9e3e5ffed1c1478ed8b78=https://www.bing.com/; bafp=bafc0c20-55d8-11ec-8b0c-cfabc670019c; bfp_sn_rt_8b2087b102c9e3e5ffed1c1478ed8b78=1638715330204; cookieconsent_status=allow'
    }
    pics_dic = "从零开始的异世界生活+壁纸"
    domain_name = "https://wall.alphacoders.com/"
    parse_root_node_xpath = '//*[@class="thumb-container-big "]'
    parse_src_xpath = '//*[@id="page_container"]/div[4]/a/@href'
    parse_detail_xpath = "./div/div[1]/a/@href"
    main_url = "https://wall.alphacoders.com/by_sub_category.php?id={}&name={}&lang=Chinese".format(str(241006),
                                                                                                    pics_dic)
    if not os.path.exists(pics_dic):
        os.mkdir(pics_dic)
    down = PicDown(headers=headers, pics_dic=pics_dic,
                   domain_name=domain_name, main_url=main_url,
                   parse_root_node_xpath=parse_root_node_xpath,
                   parse_src_xpath=parse_src_xpath,
                   parse_detail_xpath=parse_detail_xpath)

    # 针对后续页面进行爬取
    # current_page = 2
    # for i in range(2,5):
    #     if down.next_url:
    #         down.parse(down.main_url)
    #     else:
    #         down.parse(down.next_url)

    try:
        down.parse(down.main_url)
    except KeyboardInterrupt as e:
        pass
