import urllib.request as urq
import re
import json
import lxml.etree as le
import tool
import pandas as pd
import pymongo

'''
一级目录
'''
def parse1(keyword, page):
    url = 'https://search.51job.com/list/010000,000000,0000,00,9,99,{keyword},2,{page}.html?lang=c&postchannel=0000&workyear=99&cotype=99&degreefrom=99&jobterm=99&companysize=99&ord_field=0&dibiaoid=0&line=&welfare='.format(
        keyword=keyword,
        page=page
    )
    # 发送请求
    req = urq.Request(
        url=url,
        headers={
            'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.96 Safari/537.36',
        }
    )
    # 解码并转换为json文件
    content = urq.urlopen(req).read().decode('gbk', 'ignore')
    data = json.loads(
        re.findall('window.__SEARCH_RESULT__ = (.*?)</script>', content)[0]
    )
    # 按标签返回结果
    results = data['engine_search_result']
    return results

'''
二级目录
'''
def parse2(url):
    req = urq.Request(
        url=url,
        headers={
            'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.96 Safari/537.36',
        }
    )
    # 接收返回结果并利用xpath进行提取
    contentb = urq.urlopen(req).read()
    contentx = le.HTML(contentb)
    parse2_job_info = tool.xpath_union(
        contentx=contentx, path='//div[@class="tCompany_main"]/div[@class="tBorderTop_box"][1]//text()', split='', default=None)
    parse2_conn_info = tool.xpath_union(
        contentx=contentx, path='//div[@class="tCompany_main"]/div[@class="tBorderTop_box"][2]//text()', split='', default=None)
    parse2_comp_info = tool.xpath_union(
        contentx=contentx, path='//div[@class="tCompany_main"]/div[@class="tBorderTop_box"][3]//text()', split='', default=None)
    return dict(
        parse2_job_info=parse2_job_info,
        parse2_conn_info=parse2_conn_info,
        parse2_comp_info=parse2_comp_info,
    )


def spider(keyword,c,start_page=1, end_page=100):
    for page in range(start_page, end_page+1):
        parse1_datas = parse1(keyword=keyword, page=page)
        for parse1_data in parse1_datas:
            job_href = parse1_data['job_href']
            parse2_data = parse2(url=job_href)
            parse1_data['parse2_job_info'] = parse2_data['parse2_job_info']
            parse1_data['parse2_conn_info'] = parse2_data['parse2_conn_info']
            parse1_data['parse2_comp_info'] = parse2_data['parse2_comp_info']
            c.insert_one(parse1_data)
        # print(type(parse1_datas))
    #     parse1_datas = pd.DataFrame(parse1_datas)
    # with open('/home/desmond/桌面/VScode-project/CSDN/Project-招聘信息收集/51job-python.csv', 'w') as f:
    #     f.write(parse1_datas)
    # return(parse1_datas)


if __name__ == '__main__':
    client = pymongo.MongoClient()
    db = client.get_database('db001') # db = client.db001
    c = db.get_collection('51job') # c = db.51job
    spider(keyword='python', c=c, start_page=1, end_page=10)
