#!/usr/bin/env python
# -*-coding:UTF-8 -*-
'''
@Project ：爬虫-波波老师
@File：19-xpath站长素材.py
@Author ：文非
@Date：2021/3/1120:16
@Require： # 爬取战场素材中的免费建立模板
'''
import os
from urllib.parse import urlencode, quote, unquote
from urllib.request import urlopen

import requests
from lxml import etree
if __name__ == "__main__":
    # UA伪装
    headers = {
        'User-Agent': "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36"
    }
    # 指定url
    params = "免费"
    print(params)
    parma = quote(params)
    print(parma)
    url = "https://aspx.sc.chinaz.com/query.aspx?keyword={}&classID=864".format(parma)
    if not os.path.exists('./简历'):
        os.mkdir('./简历')
    for page in range(1,10):
        print(page)
        page_url = url+"&page="+ str(page)
        # 发送get请求
        # get_url = unquote(url)
        response = requests.get(url=page_url, headers=headers)
        # 获取响应整个网页的数据
        page_text =  response.text
        # 实例化etree对象获取标签属性 //div[@id="main"]/div/div/a/@href
        tree = etree.HTML(page_text)
        download_list = tree.xpath('//div[@id="main"]/div/div/a/@href')
        print(download_list)
        # 拼接对应的下载的html
        for download in download_list:
            download_url = "https:"+download
            page_download = requests.get(url=download_url,headers=headers).text
            # 获取下载页面的下载处的标签属性//div[@class="down_wrap"]/div[2]/ul/li/a/@href 【0】
            d_tree = etree.HTML(page_download)
            download_geturl = d_tree.xpath('//div[@class="down_wrap"]/div[2]/ul/li/a/@href')[0]
            print(download_geturl)
            response_down = requests.get(download_geturl,headers=headers).content
            with open("./简历/"+download_geturl.split('/')[-1], "wb")as fp:
                fp.write(response_down)
        print("************** 下载 完成 ********")


