'''.复刻课上的案例爬取站长之家这个网站的简历(https://sc.chinaz.com/jianli/free.html)
要求
<1>使用qyquery解析方式(或者可以尝试使用xpath)
<2>爬取前10页的简历即可
<3>交作业的时候提交代码即可'''

import os  # 内置os库

import requests  # 第三方包
from pyquery import PyQuery as pq  # 第三方包

def jianLi(page):
    # 1. 拿到源代码
    if page == 1:
        url = "https://sc.chinaz.com/jianli/free.html"
    else:
        url = f'https://sc.chinaz.com/jianli/free_{page}.html'
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/109.0.0.0 Safari/537.36'
    }
    # 源代码
    r = requests.get(url, headers=headers).content.decode()
    # 2. 解析
    ret = pq(r)  # 转换成pyquery类型
    div_list = ret('#container>div')  # 通过pyquery语法获取id为container的标签的所以子div标签
    for div in div_list:
        # 遍历每一个标签, 来拿对应的详情页链接地址
        href = pq(div).children('a').attr('href')
        # 简历的名字
        name = pq(div).children('p').text()
        # print(name, href)

        # 通过详情页的网址来拿源代码, 然后从源代码当中解析拿到下载地址
        detail_r = requests.get(url=href, headers=headers).content.decode()
        detail_ret = pq(detail_r)
        # 下载地址
        download_href = detail_ret('#down ul>li>a').attr('href')
        # 后缀
        end = download_href.split('.')[-1]

        with open(f'{word}/{name}.{end}', 'wb') as f:
            data = requests.get(download_href).content
            f.write(data)
            print(f'{name}保存成功')


if __name__ == '__main__':
    word = '站长之家'
    if not os.path.exists(word):
            os.mkdir(word)
    for page in range(1, 11):
        jianLi(page)
