# -*- coding: utf-8 -*-
# @Author:Thl
# @Time: 2019/4/18
"""
在url中，list/后面这个代码190200表示城市是长沙
"""
import requests
from lxml import etree
import csv
import time


headers = {
    "User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.86 Safari/537.36"
}


def get_cs_info(keywords, page, city):
    data = []
    url = f"https://search.51job.com/list/190200,000000,0000,00,9,99,{keywords},2,{str(page)}.html"
    res = requests.get(url, headers=headers)
    res.encoding = "gbk"
    text = res.text
    html = etree.HTML(text)
    result_list = html.xpath('//*[@id="resultList"]/div[@class="el"]')
    for j in result_list:
        job = j.xpath('./p/span/a/@title')[0]
        company = j.xpath('./span[1]/a/text()')[0]
        area = j.xpath('./span[2]/text()')[0]
        salary = j.xpath('./span[3]/text()')
        if len(salary) == 0:
            salary = ""
        else:
            salary = salary[0]
        date = j.xpath('./span[4]/text()')[0]
        if date[0] == "0":
            date = f"{date[1]}月{date[-2:]}日"
        else:
            date = f"{date[:2]}月{date[-2:]}日"
        data.append([job, company, area, salary, date])

    with open(f"{keywords}_{city}.csv", "a+", encoding="utf-8", newline="") as f:
        csvwriter = csv.writer(f, dialect=('excel'))
        csvwriter.writerows(data)


def get_cs_infos(keywords):
    url = f"https://search.51job.com/list/190200,000000,0000,00,9,99,{keywords},2,1.html"
    res = requests.get(url, headers=headers)
    res.encoding = "gbk"
    text = res.text
    html = etree.HTML(text)
    pages = html.xpath('//*[@id="resultList"]/div[2]/div[5]/text()')
    city = html.xpath('//*[@id="work_position_input"]/@value')[0]
    with open(f"{keywords}_{city}.csv", "w", encoding="utf-8", newline="") as f:
        csvwriter = csv.writer(f, dialect=('excel'))
        csvwriter.writerow(["职位名", "公司名", "工作地点", "薪资", "发布时间"])
    page_num = pages[1].replace(" ", "")
    page_num = int(page_num.replace("/", ""))
    for page in range(page_num):
        page += 1
        get_cs_info(keywords, page, city)


c = time.time()
get_cs_infos("python")  # 爬取关于python的相关内容
print(time.time() - c)  # 打印爬取的总时间

