import requests
from lxml import etree
import time


def getURL(page):
    url = 'https://www.liepin.com/zhaopin/?compkind=&dqs=&pubTime=&pageSize=40&salary=&compTag=&sortFlag=&degradeFlag=0&compIds=&subIndustry=&jobKind=&industries=&compscale=&key=&siTag=1B2M2Y8AsgTpgAmY7PhCfg~fA9rXquZc5IkJpXC-Ycixw&d_sfrom=search_fp_nvbar&d_ckId=042181a172846acc2a069e0b4d208340&d_curPage=1&d_pageSize=40&d_headId=042181a172846acc2a069e0b4d208340&curPage={}'
    return url.format(page)


# html = r.content.decode("UTF-8")

# nodes = etree.HTML(html)

# infos = nodes.xpath("//div[@class='sojob-item-main clearfix']")


# 爬取任意的工作岗位数据，一共需要爬取10页，信息需要包含：工作标题、薪资、工作年限、学历、公司名称、公司信息、福利信息，提交运行截图及代码。
def nodeToDict(info):
    return {
        'title':
        info.xpath(".//div[@class='job-info']/h3")[0].get("title")[2:],
        'salary':
        info.xpath(".//span[@class='text-warning']")[0].text,
        'experience':
        info.xpath(".//p[@class='condition clearfix']/span")[2].text,
        'degree':
        info.xpath(".//p[@class='condition clearfix']/span")[1].text,
        'company':
        info.xpath(".//p[@class='company-name']/a")[0].text,
        'details':
        info.xpath(".//p[@class='field-financing']/span")[0].text.strip(),
        'welfare':
        list(
            map(lambda x: x.text,
                info.xpath(".//p[@class='temptation clearfix']/span")))
    }


# resultset = map(nodeToDict, infos)
# resultset = list(titles)


def getPage1To10():
    headers = headers = {
        'user-agent':
        'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:80.0) Gecko/20100101 Firefox/80.0'
    }
    resultset = []
    for i in range(10):

        res = requests.get(getURL(i), headers=headers)
        html = res.content.decode("UTF-8")
        html = etree.HTML(html)
        html = html.xpath("//div[@class='sojob-item-main clearfix']")
        dics = map(nodeToDict, html)
        dics = list(dics)
        resultset.extend(dics)
        print("Page {} Completed!".format(i))
        print(dics)
        time.sleep(30)

    return resultset


if __name__ == "__main__":
    resultset = getPage1To10()
    with open("./result.txt", mode='a', encoding='utf-8') as f:

        f.write(str(resultset))
