# 防止ssl错误
import ssl
ssl._create_default_https_context = ssl._create_unverified_context

import urllib.request as ur
import re
import json
import lxml.etree as le
import tool
import pymongo


#
# url = 'https://search.51job.com/list/040000,000000,0000,00,9,99,python,2,6.html?lang=c&postchannel=0000&workyear=99&cotype=99&degreefrom=99&jobterm=99&companysize=99&ord_field=0&dibiaoid=0&line=&welfare='
# req = ur.Request(
#     url = url,
#     headers= {
#        'User-Agent':'Mozilla/5.0.html (X11; Linux x86_64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.html.1271.64 Safari/537.11'
#     }
# )
#
# content = ur.urlopen(req).read().decode('gbk','ignore')
# print (content)
#
# with open ('51job2.html','w',encoding='gbk') as f:
#     f.write(content)
#
# ret = re.findall('window.__SEARCH_RESULT__ =(.*?)</script>', content)
# print(ret)
#
# data = json.loads(ret[0])
# print(data)
# print(type(data))
#
# results = data["engine_search_result"]
# for result in results:
#     print(result)

def parse1(keyword,page):
    url = 'https://search.51job.com/list/040000,000000,0000,00,9,99,{},2,{}.html?lang=c&postchannel=0000&workyear=99&cotype=99&degreefrom=99&jobterm=99&companysize=99&ord_field=0&dibiaoid=0&line=&welfare='.format(
        keyword = keyword,
        page = page
    )
    req = ur.Request(
        url=url,
        headers={
            'User-Agent': 'Mozilla/5.0.html (X11; Linux x86_64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.html.1271.64 Safari/537.11'
        }
    )

    content = ur.urlopen(req).read().decode('gbk', 'ignore')
    data = json.loads(
        re.findall('window.__SEARCH_RESULT__ =(.*?)</script>', content)[0]
    )
    results = data["engine_search_result"]
    return results

# contentb = ur.urlopen(url='https://jobs.51job.com/shenzhen-nsq/127541725.html?s=01&t=0').read()
# contentx = le.HTML(contentb)
# rets = contentx.xpath('//div[@class="tCompany_main"]/div[@class="tBorderTop_box"][1]//text()')
# print(rets)
# ret_data = '--'.join([
#     ret.strip() for ret in rets
# ])
# print(ret_data)
#
# parse2_job_detail = tool.xpath_union(contentx=contentx,path='//div[@class="tCompany_main"]/div[@class="tBorderTop_box"][1]//text()', split='\n',default=None)
# parse2_job_conn = tool.xpath_union(contentx=contentx,path='//div[@class="tCompany_main"]/div[@class="tBorderTop_box"][2]//text()', split='\n',default=None)
# parse2_job_company = tool.xpath_union(contentx=contentx,path='//div[@class="tCompany_main"]/div[@class="tBorderTop_box"][3]//text()', split='\n',default=None)

def parse2(url):
    req = ur.Request(
        url=url,
        headers={
            'User-Agent': 'Mozilla/5.0.html (X11; Linux x86_64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.html.1271.64 Safari/537.11'
        }
    )

    contentb = ur.urlopen(req).read()
    contentx = le.HTML(contentb)
    parse2_job_detail = tool.xpath_union(contentx=contentx,
                                          path='//div[@class="tCompany_main"]/div[@class="tBorderTop_box"][1]//text()',
                                          split='', default=None)
    parse2_job_conn = tool.xpath_union(contentx=contentx,
                                        path='//div[@class="tCompany_main"]/div[@class="tBorderTop_box"][2]//text()',
                                        split='', default=None)
    parse2_job_company = tool.xpath_union(contentx=contentx,
                                           path='//div[@class="tCompany_main"]/div[@class="tBorderTop_box"][3]//text()',
                                           split='', default=None)
    return dict(
        parse2_job_detail = parse2_job_detail,
        parse2_job_conn = parse2_job_conn,
        parse2_job_company = parse2_job_company,
    )

def spider(keyword,c,start_page=1,end_page=100):
    for page in range(start_page,end_page+1):
        parse1_datas = parse1(keyword=keyword, page=page)
        for parse1_data in parse1_datas:
            job_href = parse1_data['job_href']
            parse2_data = parse2(url=job_href)
            parse1_data['parse2_job_detail'] = parse2_data['parse2_job_detail']
            parse1_data['parse2_job_conn'] = parse2_data['parse2_job_conn']
            parse1_data['parse2_job_company'] = parse2_data['parse2_job_company']
            c.incert_one(parse1_data)


if __name__ == '__main__':
    client = pymongo.MongoClient()
    db = client.get_database('db1')
    c = db.get_collection('51job')
    spider(keyword='java',c=c,start_page=1,end_page=10)





    # parse1_datas = parse1(keyword='java',page=1)
    # for parse1_data in parse1_datas:
    #     job_href = parse1_data['job_href']
    #     parse2_data = parse2(url=job_href)
    #     parse1_data['parse2_job_detail'] = parse2_data['parse2_job_detail']
    #     parse1_data['parse2_job_conn'] = parse2_data['parse2_job_conn']
    #     parse1_data['parse2_job_company'] = parse2_data['parse2_job_company']

 # print(parse2(url='https://jobs.51job.com/shenzhen-nsq/127541725.html?s=01&t=0'))

