import requests
import urllib.parse as up
import re
import json
import kuser_agent
import ktool
import kuser_agent as kua
import lxml.etree as le
import time
import datetime

'''
requestsO爬虫，适合运行一次，爬虫时间在半小时、一小时等的爬虫任务
上线的爬虫架构一般不会纯使用requests模块
'''

'''先写程序样子，大体轮廓'''

keyword = '数据分析'
page_num = 5
# 1级页面地址，可以填充keyword和page
URL = 'https://search.51job.com/list/000000,000000,0000,00,9,99,{keyword},2,{page}.html?lang=c&postchannel=0000&workyear=99&cotype=99&degreefrom=99&jobterm=99&companysize=99&ord_field=0&dibiaoid=0&line=&welfare='


def spider(keyword,page_num,db_name,c_name):
    keyword_urlencode = up.urlencode(
        {'k':keyword}
    )[2:]
    for page in range(1,page_num+1):
        url = URL.format(keyword=keyword_urlencode,page=page)
        print(url)
        text = requests.get(
            url = url,
            headers = {
                'User-Agent':kua.get()
            }
        ).text
        json_str = re.findall('window.__SEARCH_RESULT__ = (.*?)</script>', text)[0]
        data = json.loads(json_str)
        search_results = data['engine_search_result']
        for search_result in search_results:
            # print(search_result)
            job_href = search_result['job_href']
            print(job_href)
            content = requests.get(url=job_href,headers={'User-Agent':kua.get()}).content
            texts = le.HTML(content).xpath('//div[@class="bmsg job_msg inbox"]/text()')
            job_information = '\n'.join(texts).strip()

            createDatetime = datetime.datetime.now().strftime('%Y/%m/%d')
            search_result['job_information'] = job_information
            search_result['createDatetime'] =createDatetime
            print(search_result)


if __name__ == '__main__':
    spider(keyword = keyword,page_num=page_num,db_name='',c_name='')

