import json

import requests
from lxml.etree import HTML

urls = [
    "https://search.51job.com/list/090200,000000,0000,00,9,99,python,2,1.html?lang=c&stype=&postchannel=0000&workyear=99&cotype=99&degreefrom=99&jobterm=99&companysize=99&providesalary=99&lonlat=0%2C0&radius=-1&ord_field=0&confirmdate=9&fromType=&dibiaoid=0&address=&line=&specialarea=00&from=&welfare="]

s = requests.Session()


def get_data_from(url):
    r = s.get(url)
    r.encoding = "GBK"
    r.raise_for_status()
    text = HTML(r.text)
    jobs = text.xpath("//div[@class='el']")
    # 解析数据
    for job in jobs:
        try:
            job_info = {}
            job_name = job.xpath("./p")[0]
            extra = job.xpath("./span")
            company = extra[0].xpath("./a")[0]
            address = extra[1]
            salary = extra[2]

            job_info['job'] = job_name.xpath("./span/a")[0].text.strip()
            job_info['company'] = company.text
            job_info['address'] = address.text
            job_info['salary'] = salary.text

            # company = job.xpath("./span[1]")[0]
            # address = job.xpath("./span[2]")[0]
            # salary = job.xpath("./span[3]")[0]

            yield job_info
        except Exception as e:
            # import traceback
            # traceback.print_exc()
            pass
    # 解析新的页面地址
    pages = text.xpath("//div[@class='p_in']")[0]
    for url in pages.xpath("./ul/li/a"):
        yield url.attrib['href']


url_index = 0

with open("./jobs.txt", "w", encoding="utf-8") as fp:
    while url_index < len(urls):
        url = urls[url_index]

        for data in get_data_from(url):
            if isinstance(data, dict):
                print(data)
                fp.write(json.dumps(data) + "\n")
            elif isinstance(data, str):
                if data not in urls:
                    urls.append(data)
        url_index += 1
