import requests
from bs4 import BeautifulSoup
'''
作者：陆永均
本模块是一个爬虫核心模块，在模块中使用了beautiful soup第三方库和requests请求库
    在本模块中定义了一个类，类中会有两个方法，第一个loadpage将会作为一个请求，传入的参数为地址，默认超时时长为5秒
    第二个方法是数据筛选，返回的将会是一个请求过滤后的返回结果


'''
class Spider:



    # 参数均为str类型
    def datareturn(self,url,timeout=5):
        # print(key,page)


        user_agent = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 Safari/537.36'
        respObj = requests.get(
            url,
            None,
            # 超时信息
            timeout=timeout,
            headers={
                'User-Agent': user_agent
            }
        )
        soupObj = BeautifulSoup(respObj.text, 'lxml')
        title=[]
        money=[]
        add=[]
        href=[]
        com=[]
        i=0
        for item in soupObj.find_all('div', class_='sojob-item-main clearfix'):
            i = i + 1
            # print(item)
            # print(i)
            res = item.find('h3')
            # print(res['title'])
            title.append (str(res['title']))
            res = item.find('span', class_='text-warning')
            # print(res.text)
            if(str(res.text)=="面议"):
                continue
            else:
                money.append(int(str(res.text).split("-",1)[0]))

            res = item.find('a')
            href.append(res['href'])
            res = item.find('a', class_='area')
            # print(res)
            if (res):

                add.append(str(res.text))
            else:
                res = item.find('span', class_='area')
                add.append(str(res.text))

            res = item.find('p', class_='company-name')
            com.append(str(res.find('a').text))

        return money,add,title,href,com


if __name__=="__main__":
    sp = Spider()
    url="https://www.liepin.com/zhaopin/?init=-1&headckid=be6476591703a61f&fromSearchBtn=2&ckid=be6476591703a61f&degradeFlag=0&key=java&siTag=k_cloHQj_hyIn0SLM9IfRg~fA9rXquZc5IkJpXC-Ycixw&d_sfrom=search_unknown&d_ckId=962903d98747d74adc3cbcc4667927fd&d_curPage=0&d_pageSize=40&d_headId=962903d98747d74adc3cbcc4667927fd&curPage=50"
    t = sp.datareturn(url)
    print(t)
    for pay,region, job_name, address,company in zip(t[0], t[1],t[2], t[3], t[4]):
        print(pay)