# -*- coding:utf-8 -*-
import random
import re
import time

import requests
from lxml import html
from urllib import parse
import csv
# 地区分布，薪资水平、职位要求   大数据 岗位

key = '大数据'

key = parse.quote(parse.quote(key))

headers = {'Host': 'search.51job.com',
           'Upgrade-Insecure-Requests': '1',
           'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.89 Safari/537.36'}


def get_links(page):
    url = 'https://search.51job.com/list/190200,000000,0000,00,9,99,' + key + ',2,' + str(page) + '.html'
    print(url)
    r = requests.get(url,headers=headers, timeout=10)
    s = requests.session()
    s.keep_alive = False
    r.encoding = 'gbk'
    reg = re.compile(
        r'class="t1 ">.*? <a target="_blank" title=".*?" href="(.*?)".*? <span class="t2">.*?<span class="t4">(.*?)</span>',
        re.S)
    links = re.findall(reg, r.text)
    return links

# 多页处理，下载到文件
def get_content(link, salary):
    r1 = requests.get(link,headers=headers, timeout=10)
    s = requests.session()
    s.keep_alive = False
    r1.encoding = 'gbk'
    t1 = html.fromstring(r1.text)
    l = []
    try:
        job = t1.xpath('//div[@class="tHeader tHjob"]//h1/text()')[0]
        company = t1.xpath('//p[@class="cname"]/a/text()')[0]
        label = t1.xpath('//div[@class="t1"]/span/text()')
        education = t1.xpath('//div[@class="cn"]/p[2]/text()')[2]
        salary = salary
        area = t1.xpath('//div[@class="cn"]/p[2]/text()')[0]
        companytype = t1.xpath('/html/body/div[3]/div[2]/div[4]/div[1]/div[2]/p[1]/text()')[0]
        companyScale = t1.xpath('/html/body/div[3]/div[2]/div[4]/div[1]/div[2]/p[2]/text()')[0]
        scope = t1.xpath('/html/body/div[3]/div[2]/div[4]/div[1]/div[2]/p[3]/a[1]/text()')[0]
        workyear = t1.xpath('//div[@class="cn"]/p[2]/text()')[1]

        require = re.findall(re.compile(r'<div class="bmsg job_msg inbox">.*?任职要求(.*?)<div class="mt10">', re.S),
                             r1.text)
        try:
            # file = codecs.open('51job.csv', 'a+', encoding='utf-8')
            # item = str(company) + '\t' + str(job) + '\t' + str(education) + '\t' + str(label) + '\t' + str(
            #     salary) + '\t' + str(companytype) + '\t' + str(workyear) + '\t' + str(area) + '\t' + str(
            #     workyear) + str(describe) + '\t' + str(require) + '\n'
            # file.write(item)
            # file.close()
            row = [str(company),str(job) ,str(education) ,str(label),str(
                salary),str(companytype),str(companyScale),str(scope),str(workyear),str(area),str(require)]
            return row
        except Exception as e:
            print(e)
            return None
        # output='{},{},{},{},{},{},{},{}\n'.format(company,job,education,label,salary,area,describe,require)
        # with open('51job.csv', 'a+', encoding='utf-8') as f:
        # f.write(output)
    except Exception as e:
        print(e)
        return None


# 获取总页数
def getPageNum(url):
    headers = {'Host': 'search.51job.com',
               'Upgrade-Insecure-Requests': '1',
               'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.89 Safari/537.36'}
    html = requests.get(url, headers=headers, timeout=10)
    page = html.content.decode("gbk")
    return re.search(r'<span class="td">共(\d*)页，到第</span>', page, re.S).group(1)

url = "https://search.51job.com/list/190200,000000,0000,00,9,99,%25E5%25A4%25A7%25E6%2595%25B0%25E6%258D%25AE,2,1.html"
pageNum = getPageNum(url)

with open("51job.csv", 'w', encoding="utf-8") as f:
    writer = csv.writer(f)
    title = ["公司","职位","学历","福利","工资","公司类型","公司规模","经营范围","工作经验","地区","任职要求"]
    # 标题
    writer.writerow(title)

for i in range(1, int(pageNum)):
    print('正在爬取第{}页信息'.format(i))
    # time.sleep(random.random()+random.randint(1,5))
    links = get_links(i)
    # 标题
    items = []
    for link in links:
        url = link[0]
        salary = link[1]
        item = get_content(url, salary)
        if item != None:
            items.append(item)
    # 循环结束将数据写到文件
        # 内容
    with open("51job.csv", 'a+', encoding="utf-8") as f:
        writer = csv.writer(f)
        writer.writerows(items)
    #time.sleep(random.random() + random.randint(0,1))
