import urllib.request as ur
import  re
import requests
from lxml import html
import user_agent
import json
import tool
import pymongo
etree =html.etree

proxy_address= ur.urlopen('http://a.ipjldl.com/getapi?packid=1&unkey=&tid=&qty=2&time=5&port=1&format=txt&ss=2&css=&pro=&city=&dt=1&usertype=17').read().decode('gbk')

proxy_handler=ur.ProxyHandler(
    {
        'http':proxy_address
    }
)

proxy_opener=ur.build_opener(proxy_handler)

def parse1(keyword,page):
    url = f'https://search.51job.com/list/010000,000000,0000,00,9,99,{keyword},2,{page}.html?lang=c&postchannel=0000&workyear=99&cotype=99&degreefrom=99&jobterm=99&companysize=99&ord_field=0&dibiaoid=0&line=&welfare='.format(
        keyword=keyword,
        page=page
    )
    req = ur.Request(
        url=url,
        headers={
            'User-Agent': user_agent.get_user_agent_pc()
        }
    )

    content = proxy_opener.open(req).read().decode('gbk', 'ignore')
    ret = re.findall('window.__SEARCH_RESULT__ =(.*?)</script>', content)
    data = json.loads(ret[0])
    results = data['engine_search_result']
    return results

def parse2(url):
    req = ur.Request(
        url=url,
        headers={
            'User-Agent': user_agent.get_user_agent_pc()
        }
    )
    contentb=proxy_opener.open(req).read()
    contentx = etree.HTML(contentb)
    parse2_job_detail = tool.xpath_union(contentx=contentx,
                                         path='//div[@class="tCompany_main"]/div[@class="tBorderTop_box"][1]//text()',
                                         split='', default=None)
    parse2_job_company = tool.xpath_union(contentx=contentx,
                                          path='//div[@class="tCompany_main"]/div[@class="tBorderTop_box"][2]//text()',
                                          split='', default=None)
    return dict(
        parse2_job_detail=parse2_job_detail,
        parse2_job_company=parse2_job_company,
    )

def spider(keyword,c,start_page=1,end_page=100):
    for page in range(start_page,end_page+1):
        parse1_datas = parse1(keyword=keyword, page=page)
        for parse1_data in parse1_datas:
            job_href = parse1_data['job_href']
            parse2_data = parse2(url=job_href)
            parse1_data['parse2_job_detail'] = parse2_data['parse2_job_detail']
            parse1_data['parse2_job_company'] = parse2_data['parse2_job_company']
            c.insert_one(parse1_data)


if __name__ == '__main__':
    client=pymongo.MongoClient()
    db=client.get_database('db001')
    c=db.get_collection(('51jobp'))
    spider(keyword='python',c=c,start_page=1,end_page=10)
