import csv
import json
import re
import time
import requests


current_time = time.strftime("%Y%m%d-%H%M%S", time.localtime())
file_name = '51job_python_' + current_time + '.csv'


def get_response(url):
    timeout = 10
    headers = {
        'Host': 'search.51job.com',
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.88 Safari/537.36',
    }
    response = requests.get(url, headers=headers, timeout=timeout)
    return response


csv_file = open(file_name, 'a', newline='')
writer = csv.writer(csv_file)
writer.writerow(['职位名称', '职位详情', '公司名称',
                 '公司类型', '公司主营', '公司详情',
                 '薪资',
                 '工作地点', '更新日期', '发布日期',
                 '招聘人数', '学历要求', '工作经验',
                 ])

start_url = 'https://search.51job.com/list/020000,000000,0000,00,9,99,python,2,1.html?lang=c&postchannel=0000&workyear=99&cotype=99&degreefrom=99&jobterm=99&companysize=99&ord_field=0&dibiaoid=0&line=&welfare='
response = get_response(start_url)
script_info = json.loads(re.findall("window.__SEARCH_RESULT__(.*?)</script>", response.text)[0][2:])
total_page = script_info['total_page']
print(f"共{total_page}页")
# 详情链接
# url_list = []
for i in range(1, int(total_page)+1):
    url_split = start_url.split('1.')
    page_url = url_split[0] + str(i) + "." + url_split[1]
    print(f"正在爬取第{i}页数据")

    response = get_response(page_url)
    # html = etree.HTML(response.content)
    # script_info = html.xpath('//script[@type="text/javascript"]/text()')[0]
    script_info = json.loads(re.findall("window.__SEARCH_RESULT__(.*?)</script>", response.text)[0][2:])
    print(script_info)

    for j in script_info['engine_search_result']:
        try:
            info_collection = []
            info_row = (j['job_name'], j['jobwelf'], j['company_name'], j['companytype_text'], j['companyind_text'],
                        j['companysize_text'], j['providesalary_text'],
                        j['workarea_text'], j['updatedate'], j['issuedate'], j['attribute_text'][3],
                        j['attribute_text'][2],
                        j['attribute_text'][1])
            info_collection.append(info_row)
            print(info_row)
            writer.writerows(info_collection)
        except IndexError as e:
            print('IndexError')
    print(f"第{i}页爬取完毕")
print(current_time + "的数据全部全部爬取完毕")

