import requests
from lxml import etree
from urllib import parse
import asyncio
import copy
import aiohttp
from concurrent.futures import ThreadPoolExecutor

works = ThreadPoolExecutor(10)

headers = {
    "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.82 Safari/537.36",
    'Cookie': 'guid=94e8e34cabf57f2f2318fdb2c9a13869; _ujz=MTkwMDEyNDc4MA%3D%3D; ps=needv%3D0; slife=lowbrowser%3Dnot%26%7C%26lastlogindate%3D20210429%26%7C%26securetime%3DVWlUYVI1Uj9VMQ80W2QLZlFnBTM%253D; track=registertype%3D1; 51job=cuid%3D190012478%26%7C%26cusername%3D0siZgP8OPiW69K7brQ%252BxIxZYZg2o4mQk5QuEmDucHQY%253D%26%7C%26cpassword%3D%26%7C%26cname%3D1fKZ26SXRb8tkm58nNWZ9A%253D%253D%26%7C%26cemail%3DE6uv8x11VA684PgbTP5G3xG%252BQcNiGOWbcA5Jppp8wUE%253D%26%7C%26cemailstatus%3D0%26%7C%26cnickname%3D%26%7C%26ccry%3D.0CXHr9ikSGF.%26%7C%26cconfirmkey%3D%25241%2524zm9yk0Y4%25244a7g8GXIRdTl2ZSQMi7TZ.%26%7C%26cautologin%3D0%26%7C%26cenglish%3D0%26%7C%26sex%3D0%26%7C%26cnamekey%3D%25241%2524oGKUuPm7%2524tW9UvIO7xobvq.26XNAdD0%26%7C%26to%3D993a606209af58a67efa1e0f45ba43f36089f7fb%26%7C%26; nsearch=jobarea%3D%26%7C%26ord_field%3D%26%7C%26recentSearch0%3D%26%7C%26recentSearch1%3D%26%7C%26recentSearch2%3D%26%7C%26recentSearch3%3D%26%7C%26recentSearch4%3D%26%7C%26collapse_expansion%3D; search=jobarea%7E%60020000%2C030200%2C070200%2C080200%7C%21ord_field%7E%600%7C%21recentSearch0%7E%60020000%2C030200%2C070200%2C080200%A1%FB%A1%FA000000%A1%FB%A1%FA0000%A1%FB%A1%FA32%A1%FB%A1%FA99%A1%FB%A1%FA%A1%FB%A1%FA99%A1%FB%A1%FA99%A1%FB%A1%FA99%A1%FB%A1%FA99%A1%FB%A1%FA9%A1%FB%A1%FA99%A1%FB%A1%FA%A1%FB%A1%FA0%A1%FB%A1%FAjava%A1%FB%A1%FA2%A1%FB%A1%FA1%7C%21recentSearch1%7E%60030200%A1%FB%A1%FA000000%A1%FB%A1%FA0000%A1%FB%A1%FA32%A1%FB%A1%FA99%A1%FB%A1%FA%A1%FB%A1%FA99%A1%FB%A1%FA99%A1%FB%A1%FA99%A1%FB%A1%FA99%A1%FB%A1%FA9%A1%FB%A1%FA99%A1%FB%A1%FA%A1%FB%A1%FA0%A1%FB%A1%FAjava%A1%FB%A1%FA2%A1%FB%A1%FA1%7C%21recentSearch2%7E%60030200%A1%FB%A1%FA000000%A1%FB%A1%FA0000%A1%FB%A1%FA32%A1%FB%A1%FA99%A1%FB%A1%FA%A1%FB%A1%FA99%A1%FB%A1%FA99%A1%FB%A1%FA99%A1%FB%A1%FA99%A1%FB%A1%FA9%A1%FB%A1%FA99%A1%FB%A1%FA%A1%FB%A1%FA0%A1%FB%A1%FA%BF%AA%B7%A2%B9%A4%B3%CC%CA%A6%A1%FB%A1%FA2%A1%FB%A1%FA1%7C%21recentSearch3%7E%60030200%A1%FB%A1%FA000000%A1%FB%A1%FA0000%A1%FB%A1%FA32%A1%FB%A1%FA99%A1%FB%A1%FA%A1%FB%A1%FA99%A1%FB%A1%FA99%A1%FB%A1%FA99%A1%FB%A1%FA99%A1%FB%A1%FA9%A1%FB%A1%FA99%A1%FB%A1%FA%A1%FB%A1%FA0%A1%FB%A1%FApython%A1%FB%A1%FA2%A1%FB%A1%FA1%7C%21recentSearch4%7E%60030200%A1%FB%A1%FA000000%A1%FB%A1%FA0000%A1%FB%A1%FA32%A1%FB%A1%FA99%A1%FB%A1%FA%A1%FB%A1%FA99%A1%FB%A1%FA99%A1%FB%A1%FA99%A1%FB%A1%FA99%A1%FB%A1%FA9%A1%FB%A1%FA99%A1%FB%A1%FA%A1%FB%A1%FA0%A1%FB%A1%FA%A1%FB%A1%FA2%A1%FB%A1%FA1%7C%21collapse_expansion%7E%601%7C%21',
    'Accept': 'application/json, text/javascript, */*; q=0.01'
}
detail_headers = copy.deepcopy(headers)
detail_headers[
    'Accept'] = 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9'

user_input = ""


def to_csv(save_str):
    with open(f"{user_input}.csv", "a+", encoding='utf-8') as fp:
        fp.write(save_str)
        print(save_str)


def get_detail(job_href, save_str):
    detail = requests.get(job_href, headers=detail_headers)
    detail.encoding = 'gbk'
    soup = etree.HTML(detail.text)
    job_info = "".join(soup.xpath('//div[@class="bmsg job_msg inbox"]/p/text()')).replace(r"\xa", "").replace("\n",
                                                                                                              "").strip()  # 职位信息
    company_info = "".join(soup.xpath('//div[@class="tmsg inbox"]//text()')).replace(r"\xa", "").replace("\n",
                                                                                                         "").strip()  # 公司信息
    save_str = save_str + "," + job_info + "," + company_info + "\n"
    to_csv(save_str)


def callback(task):
    tu_data = task.result()
    for data in tu_data:
        save_str,job_href = data[0],data[1]
        works.submit(get_detail, job_href, save_str)


async def get_data(url):
    tu_data = []
    async with aiohttp.ClientSession(headers=headers) as session:
        async with await session.get(url=url) as response:
            json_response = await response.json(content_type='text/html', encoding='gbk')
            for json_data in json_response['engine_search_result']:
                company_name = json_data['company_name']
                company_text = json_data['companytype_text'] + " | " + json_data['companysize_text']  # 公司名字下面的描述
                companyind_text = json_data['companyind_text']  # 所属类别
                job_name = json_data['job_name']
                jobid = json_data['jobid']
                jobwelf = json_data['jobwelf']  # 工作福利
                job_href = json_data['job_href']
                providesalary_text = json_data['providesalary_text']  # 薪水
                attribute_text = " | ".join(json_data["attribute_text"])  # 薪水边上的简介
                save_str = company_name + "," + company_text + "," + companyind_text + "," + job_name + "," + jobid + "," + jobwelf + "," + job_href + "," + providesalary_text + "," + attribute_text
                tu_data.append((save_str,job_href))
    return tu_data


def get_page_num(url):
    json_response = requests.get(url, headers=headers).json()
    return json_response['total_page']


if __name__ == '__main__':
    user_input = "python"
    job_name = parse.quote(user_input)

    with open(f"{user_input}.csv", "a+", encoding='utf-8') as fp:
        fp.write("公司名字,公司描述,所属类别,职位名称,职位id,职位福利,职位详情链接,薪水,简介,职位信息,公司信息" + "\n")

    url = "https://search.51job.com/list/010000%252c020000%252c030200%252c040000%252c180200,000000,0000,00,9,99,{job_name},2,{page}.html?lang=c&postchannel=0000&workyear=99&cotype=99&degreefrom=99&jobterm=99&companysize=99&ord_field=0&dibiaoid=0&line=&welfare="
    total_page_num = get_page_num(url.format(job_name=job_name, page=1))
    url_list = [url.format(job_name=job_name, page=page) for page in range(1, int(total_page_num) + 1)]
    tasks = []
    for url in url_list:
        c = get_data(url)
        task = asyncio.ensure_future(c)
        task.add_done_callback(callback)
        tasks.append(task)
    loop = asyncio.get_event_loop()
    loop.run_until_complete(asyncio.wait(tasks))
