import urllib.request as ur
import user_agent
import re
import json
import lxml.etree as le
import tool
import pymongo

def job(keyword, page):
    '''功能：获取搜索岗位的首页信息'''
    # 连接到网页
    url = 'https://search.51job.com/list/010000,000000,0000,00,9,99,{keyword},2,{page}.html?lang=c&postchannel=0000&workyear=99&cotype=99&degreefrom=99&jobterm=99&companysize=99&ord_field=0&dibiaoid=0&line=&welfare='.format(
            keyword = keyword,
            page = page
    )

    request = ur.Request(
        url = url,
        headers = {
            'User-Agent': user_agent.get_user_agent_pc(),
            'Cookie': 'guid=cce9c37b54b95d8f2ab615543f62b701; slife=lowbrowser%3Dnot%26%7C%26lastlogindate%3D20200914%26%7C%26securetime%3DDDAEMQdhUTUFZwA2DDINYVVlCjs%253D; track=registertype%3D2; _ujz=MTc5MDc1OTE0MA%3D%3D; ps=needv%3D0; 51job=cuid%3D179075914%26%7C%26cusername%3Demail_20200914_d2aa8232%26%7C%26cpassword%3D%26%7C%26cname%3D%26%7C%26cemail%3D2429800426%2540qq.com%26%7C%26cemailstatus%3D0%26%7C%26cnickname%3D%26%7C%26ccry%3D.0Myn3tABiys2%26%7C%26cconfirmkey%3D24TFUint%252F0oYg%26%7C%26cautologin%3D1%26%7C%26cenglish%3D0%26%7C%26sex%3D%26%7C%26cnamekey%3D24.sl07DRXAys%26%7C%26to%3D3e15bc42c2c729de311472ae88f798195f5f5fa6%26%7C%26; adv=adsnew%3D0%26%7C%26adsnum%3D1173456%26%7C%26adsresume%3D1%26%7C%26adsfrom%3Dhttps%253A%252F%252Fwww.so.com%252Flink%253Fm%253Db4fpaJjcQeayWFLyQorrUIN9yJP7hMr%25252BHs0vWkowux%25252F0QwynPqgEBvoWHSHwfigihtT0vC0XVGQp02QsSex85Ry08fQOfgt7%25252FAbbGYDZzaSXnjSjTY4hEHAy9qmkZHf119H5UCDyTWnDMQezeYKOF8BVklPWU6qLGQrM9uAIy1hIsfjYnVtUGOpzrHbbwwTThFB6czGsAyzwvjWOjBZfbgf8Z%25252BuXxBtzj3uHYkErAp6aDiLN02cG3LF8%25252BXJQB9gmOO48LRQiyWHxaSgVu3MPbZq9t2h%25252FF4owqwRTq1Ln%25252FUM4%25253D%26%7C%26ad_logid_url%3D0%26%7C%26; nsearch=jobarea%3D%26%7C%26ord_field%3D%26%7C%26recentSearch0%3D%26%7C%26recentSearch1%3D%26%7C%26recentSearch2%3D%26%7C%26recentSearch3%3D%26%7C%26recentSearch4%3D%26%7C%26collapse_expansion%3D; search=jobarea%7E%60010000%7C%21ord_field%7E%600%7C%21recentSearch0%7E%60010000%A1%FB%A1%FA000000%A1%FB%A1%FA0000%A1%FB%A1%FA00%A1%FB%A1%FA99%A1%FB%A1%FA%A1%FB%A1%FA99%A1%FB%A1%FA99%A1%FB%A1%FA99%A1%FB%A1%FA99%A1%FB%A1%FA9%A1%FB%A1%FA99%A1%FB%A1%FA%A1%FB%A1%FA0%A1%FB%A1%FApython%A1%FB%A1%FA2%A1%FB%A1%FA1%7C%21; __guid=115568644.4473767506151209000.1600085775068.8374; monitor_count=3'
        }
    )

    # 由于首页有前端渲染，需对网页进行解码
    response = ur.urlopen(request).read().decode('gbk', 'ignore')
    # with open('51job.html', 'w',encoding = 'gbk') as f:
    #      f.write(response)

    # 正则爬取网页内容
    ret  = re.findall("window.__SEARCH_RESULT__ = (.*?)</script>", response)
    data = json.loads(ret[0])
    results = data['engine_search_result']

    return results

def job_details(url):
    '''获取职位的详细信息'''
    request = ur.Request(
            url = url,
            headers = {
                'User-Agent': user_agent.get_user_agent_pc(),
                'Cookie': 'guid=cce9c37b54b95d8f2ab615543f62b701; slife=lowbrowser%3Dnot%26%7C%26lastlogindate%3D20200914%26%7C%26securetime%3DDDAEMQdhUTUFZwA2DDINYVVlCjs%253D; track=registertype%3D2; _ujz=MTc5MDc1OTE0MA%3D%3D; ps=needv%3D0; 51job=cuid%3D179075914%26%7C%26cusername%3Demail_20200914_d2aa8232%26%7C%26cpassword%3D%26%7C%26cname%3D%26%7C%26cemail%3D2429800426%2540qq.com%26%7C%26cemailstatus%3D0%26%7C%26cnickname%3D%26%7C%26ccry%3D.0Myn3tABiys2%26%7C%26cconfirmkey%3D24TFUint%252F0oYg%26%7C%26cautologin%3D1%26%7C%26cenglish%3D0%26%7C%26sex%3D%26%7C%26cnamekey%3D24.sl07DRXAys%26%7C%26to%3D3e15bc42c2c729de311472ae88f798195f5f5fa6%26%7C%26; adv=adsnew%3D0%26%7C%26adsnum%3D1173456%26%7C%26adsresume%3D1%26%7C%26adsfrom%3Dhttps%253A%252F%252Fwww.so.com%252Flink%253Fm%253Db4fpaJjcQeayWFLyQorrUIN9yJP7hMr%25252BHs0vWkowux%25252F0QwynPqgEBvoWHSHwfigihtT0vC0XVGQp02QsSex85Ry08fQOfgt7%25252FAbbGYDZzaSXnjSjTY4hEHAy9qmkZHf119H5UCDyTWnDMQezeYKOF8BVklPWU6qLGQrM9uAIy1hIsfjYnVtUGOpzrHbbwwTThFB6czGsAyzwvjWOjBZfbgf8Z%25252BuXxBtzj3uHYkErAp6aDiLN02cG3LF8%25252BXJQB9gmOO48LRQiyWHxaSgVu3MPbZq9t2h%25252FF4owqwRTq1Ln%25252FUM4%25253D%26%7C%26ad_logid_url%3D0%26%7C%26; nsearch=jobarea%3D%26%7C%26ord_field%3D%26%7C%26recentSearch0%3D%26%7C%26recentSearch1%3D%26%7C%26recentSearch2%3D%26%7C%26recentSearch3%3D%26%7C%26recentSearch4%3D%26%7C%26collapse_expansion%3D; search=jobarea%7E%60010000%7C%21ord_field%7E%600%7C%21recentSearch0%7E%60010000%A1%FB%A1%FA000000%A1%FB%A1%FA0000%A1%FB%A1%FA00%A1%FB%A1%FA99%A1%FB%A1%FA%A1%FB%A1%FA99%A1%FB%A1%FA99%A1%FB%A1%FA99%A1%FB%A1%FA99%A1%FB%A1%FA9%A1%FB%A1%FA99%A1%FB%A1%FA%A1%FB%A1%FA0%A1%FB%A1%FApython%A1%FB%A1%FA2%A1%FB%A1%FA1%7C%21; __guid=115568644.4473767506151209000.1600085775068.8374; monitor_count=3'
            }
    )
    content = ur.urlopen(request).read()
    # data = le.HTML(content)
    # rets = data.xpath("//div[@class = 'tCompany_main']//div[@class = 'tBorderTop_box'][1]//text()")
    # res = ' '.join([ret.strip() for ret in rets])
    # # print(res)

    # 依次获得岗位的具体要求、联系方式和公司简介等相关信息
    job_detail = tool.xpath_union(le.HTML(content), "//div[@class = 'tCompany_main']//div[@class = 'tBorderTop_box'][1]//text()", split = ' ', default = None)
    job_conn = tool.xpath_union(le.HTML(content), "//div[@class = 'tCompany_main']//div[@class = 'tBorderTop_box'][2]//text()", split = ' ', default = None)
    job_company = tool.xpath_union(le.HTML(content), "//div[@class = 'tCompany_main']//div[@class = 'tBorderTop_box'][3]//text()", split = ' ', default = None)
    return dict(
        job_detail = job_detail,
        job_conn = job_conn,
        job_company = job_company
    )

def spider(keyword, c, start_page = 1, end_page = 100):
    '''功能：输入岗位关键字和页码，调用上述函数爬取网页信息，并写入数据库'''
    for page in (start_page, end_page + 1):
        datas = job(keyword, page)
        for data in datas:
            url = data['job_href']
            res = job_details(url)
            data['job_detail'] = res['job_detail']
            data['job_conn'] = res['job_conn']
            data['job_company'] = res['job_company']
            c.insert_one(data)


if __name__ == '__main__':
    # 连接mongo数据库
    client = pymongo.MongoClient()
    db = client.get_database('homework')
    c = db.get_collection('Java')
    keyword = input("请输入搜索岗位：")
    start_page =  int(input("请输入搜索起始页："))
    end_page = int(input("请输入搜索终止页："))
    spider(keyword = keyword, c = c, start_page = start_page, end_page = end_page)
