import requests
import bs4


def get_jobs(url):
    r = requests.get(url)
    r.encoding = "GBK"
    r.raise_for_status()
    bs = bs4.BeautifulSoup(r.text, features='lxml')
    item = bs.find("div", attrs={"class": "el title"})
    while item:
        # 指向下一个元素
        item = item.next_element
        if not item:
            continue

        if isinstance(item, bs4.element.NavigableString):
            continue
        job_info = {}
        if "class" in item.attrs and "el" in item.attrs['class']:
            try:
                job_info['title'] = item.p.span.a.string.strip()
                items = item.find_all('span', recursive=False)
                job_info['company'] = items[0].a.text
                job_info['address'] = items[1].text
                job_info['salary'] = items[2].text
                # 返回结果
                yield job_info
            except:
                pass

    dw_page = bs.find("div", attrs={"class": "dw_page"})
    if not dw_page:
        return
    pages = dw_page.find_all("li", attrs={"class": "bk"})
    for p in pages:
        try:
            if p.a.text == "下一页":
                next_page = p.a.attrs['href']
                print("Get next page:" + next_page)
                # 展开生成器
                yield from get_jobs(next_page)
        except:
            continue

for j in get_jobs("https://search.51job.com/list/090200,000000,0000,40%252C32,9,99,PHP%25E5%25BC%2580%25E5%258F%2591,2,1.html?lang=c&stype=1&postchannel=0000&workyear=99&cotype=99&degreefrom=99&jobterm=99&companysize=99&lonlat=0%2C0&radius=-1&ord_field=0&confirmdate=9&fromType=&dibiaoid=0&address=&line=&specialarea=00&from=&welfare="):
    print(j)