import pymongo
import ssl
ssl._create_default_https_context = ssl._create_unverified_context

import urllib.request as ur
import user_agent
import re
import json
import lxml.etree as le
import tool_here
import time;
# '''
# https://search.51job.com/list/020000,000000,0000,00,9,99,python,2,1.html?lang=c&postchannel=0000&workyear=99&cotype=99&degreefrom=99&jobterm=99&companysize=99&ord_field=0&dibiaoid=0&line=&welfare=
# '''

def parse1(keyword,page):
    headers = {
        "User_Agent": 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.1 (KHTML, like Gecko) Chrome/14.0.835.163 Safari/535.1',
    }
    url = 'https://search.51job.com/list/020000,000000,0000,00,9,99,{keyword},2,{page}.html?lang=c&postchannel=0000&workyear=99&cotype=99&degreefrom=99&jobterm=99&companysize=99&ord_field=0&dibiaoid=0&line=&welfare='.format(keyword = keyword,page = page)

    request = ur.Request(url = url, headers=headers)
    content = ur.urlopen(request).read().decode('gbk','ignore')
    #content = get_proxy_handler().open(request).read().decode('gbk','ignore')
    x = re.findall('window.__SEARCH_RESULT__ = (.*?)</script>', content)[0]
    jsonData = json.loads(x)
    results = jsonData['engine_search_result']
    return results


def parseDetail(url):
    headers = {
         "User_Agent": user_agent.get_user_agent_pc()
    }
    request = ur.Request(url = url, headers=headers)
    content = ur.urlopen(request).read()
    #content = get_proxy_handler().open(request).read()
    print("getContent details")
    contextx = le.HTML(content)
    job_detail = xpath_union(contentx = contextx, path='//div[@class="tCompany_main"]/div[@class="tBorderTop_box"][1]//text()', split='', default=None)
    job_conn = xpath_union(contentx = contextx, path='//div[@class="tCompany_main"]/div[@class="tBorderTop_box"][2]//text()', split='', default=None)
    job_company = xpath_union(contentx = contextx, path='//div[@class="tCompany_main"]/div[@class="tBorderTop_box"][2]//text()', split='', default=None)
    return dict(
        parse_job_detail = job_detail,
        parse_job_conn = job_conn,
        parse_jon_company = job_company,
    )

def get_proxy_handler():
    proxyAddress = ur.urlopen(
        'http://api.ip.data5u.com/dynamic/get.html?order=9acd9a923863c711318f83cfd9061057&random=1&sep=3').read().decode(
        'utf-8')
    print(proxyAddress)
    proxy_handler = ur.ProxyHandler({
        'http': proxyAddress
    })
    proxy_opener = ur.build_opener(proxy_handler)
    return proxy_opener

def spider(keyword, start_page = 1, end_page = 100):
    mongodata = []
    for page in range(start_page, end_page+1):
        print("loading page {} .... ".format(page))
        parseData = parse1(keyword=keyword,page=page)
        for data in parseData:
            job_href = data['job_href']
            print("loading page {} .... ".format(job_href))
            detail_data = parseDetail(url=job_href)
            data['parse_job_detail'] = detail_data['parse_job_detail']
            data['parse_job_conn'] = detail_data['parse_job_conn']
            data['parse_jon_company'] = detail_data['parse_jon_company']
            mongodata.append(data)
            print("******** Finish work {} .... ".format(job_href))
        collection.insert_many(mongodata)
        mongodata.clear()



# 返回唯一的xpath结果
def xpath_one(contentx, path, default=None):
    rets = contentx.xpath(path)
    return rets[0] if rets else default


# 返回多个xpath的结果
def xpath_all(contentx, path):
    rets = contentx.xpath(path)
    return rets


def xpath_union(contentx, path, split='', default=None):
    ret = split.join( [ ret.strip() for ret in contentx.xpath(path) ])
    return ret if ret else default

if __name__ == '__main__':
    clinet = pymongo.MongoClient('localhost:27017',username="ian", password="secretPassword", authSource='cool_db',authMechanism='SCRAM-SHA-1')
    db = clinet.get_database('cool_db')
    collection = db.get_collection('51job')
    #spider("python",c=collection,end_page=20)
    spider("java", start_page=4, end_page=20)