import urllib.request as ur
import user_agent
import re
import json
import lxml.etree as le
import tool
import pymongo




def pares1(keyword,page):
    # 获取传递进来的页码和关键字
    url = 'https://search.51job.com/list/130200,000000,0000,00,9,99,{keyword},2,{page}.html?lang=c&postchannel=0000&workyear=99&cotype=99&degreefrom=99&jobterm=99&companysize=99&ord_field=0&dibiaoid=0&line=&welfare='.format(
        keyword = keyword,
        page = page
    )
    req = ur.Request(
        #获取URL
        url=url,
        # 获取User-Agent
        headers={
            'User-Agent': user_agent.get_user_agent_pc()
        }
    )
    #获取字符串对面
    content = ur.urlopen(req).read().decode('gbk', 'ignore')
    #把json对象转换为字典
    data = json.loads(
        re.findall('window.__SEARCH_RESULT__ = (.*?)</script>', content)[0]
    )
    #获取到results
    results = data['engine_search_result']
    return results



def parse2(url):
    req = ur.Request(
        #获取URL地址
        url=url,
        #获取User-Agent
        headers={
            'User-Agent': user_agent.get_user_agent_pc()
        }
    )
    #获取字节对象
    contentb = ur.urlopen(req).read()
    #转换为xl对象
    contentx = le.HTML(contentb)
    #进行数据提取
    parse2_job_detail = tool.xpath_union(contentx=contentx,path='//div[@class="tCompany_main"]/div[@class="tBorderTop_box"][1]//text()',split='', default=None)
    parse2_job_conn = tool.xpath_union(contentx=contentx,path='//div[@class="tCompany_main"]/div[@class="tBorderTop_box"][2]//text()',split='', default=None)
    parse2_job_company = tool.xpath_union(contentx=contentx,path='//div[@class="tCompany_main"]/div[@class="tBorderTop_box"][3]//text()',split='', default=None)
    #构造出字典
    return dict(
        parse2_job_detail = parse2_job_detail,
        parse2_job_conn = parse2_job_conn,
        parse2_job_company = parse2_job_company,
    )


def spider(keyword,c,start_page=1,end_page=100):
    #为了数据的累加所以需要循环
    for page in range(start_page,end_page + 1):
        pares1_datas = pares1(keyword = keyword,page = page)
        #提取数据
        for parse1_data in pares1_datas:
            job_href = parse1_data['job_href']
            parse2_data = parse2(url=job_href)
            parse1_data['parse2_job_detail'] = parse2_data['parse2_job_detail']
            parse1_data['parse2_job_conn'] = parse2_data['parse2_job_conn']
            parse1_data['parse2_job_company'] = parse2_data['parse2_job_company']
            #保存数据
            c.insert_one(parse1_data)


if __name__ == '__main__':
    client = pymongo.MongoClient()
    db = client.get_database('db001')
    c = db.get_collection('51job')
    spider(keyword='python',c = c,start_page=1,end_page=10)
