#!/usr/bin/env python
# -*- coding:utf-8 -*-
# @FileName  :resume_download.py
# @Time      :2024/2/29 18:39
# @Author    :Sherlock
import time
import random
import asyncio

import requests
import aiohttp
from lxml import etree

number = 1
url = "https://sc.chinaz.com/jianli/"
headers = {
    "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/121.0.0.0 Safari/537.36",
}


async def download_resume(url):
    with aiohttp.ClientSession() as sess:
        with sess.get(url) as resp:
            html = etree.HTML(resp.text)
            resume_link = html.xpath('//*[@id="down"]/div[2]/ul/li[1]/a/@src')[0]
            with sess.get(resume_link) as res:
                return res


def downloader(res):
    response = res.result()
    with open(f"{int(time.time()) + random.randint(1000, 9999)}.rar", "wb") as f:
        f.write(response.content)


res = requests.get(url, headers=headers)
res.encoding = "utf-8"
html_text = etree.HTML(res.text)
links = html_text.xpath('//*[@id="container"]/div')

resumes = [link.xpath('./p/a/@href')[0] for link in links]

tasks = []
for resume in resumes:
    c = download_resume(resume)
    task = asyncio.ensure_future(c)
    tasks.append(task)

loop = asyncio.get_event_loop()
loop.run_until_complete(asyncio.wait(tasks))
