##51job前程无忧
import requests
from parsel import Selector
from concurrent import futures
import pymysql
def crawl_list(href):
    while True:
        res_html=requests.get(href).text
        sel=Selector(text=res_html)
        job_info_href.extend(sel.xpath('//p[@class="t1 "]/span/a/@href').extract())
        next_href=sel.xpath('//li[@class="bk"]/a/@href').extract_first()
        break
        # if next_href:
        #     href=next_href

def crawl_info(info_href):
    print(info_href)
    res_html = requests.get(info_href).text
    sel = Selector(text=res_html)
    zw_info_dict={}
    zw_info_dict['zw_name']=sel.xpath('//div[@class="cn"]/h1/@title|//div[@class="detail-left"]/p/text()').extract_first()
    zw_xz_h_l=sel.xpath('//div[@class="cn"]/strong/text()').extract_first()
    if zw_xz_h_l:
        zw_xz_h_l=zw_xz_h_l.strip('千/月').split('-')
        if len(zw_xz_h_l) > 1:
            zw_info_dict['zw_xz_h'] = zw_xz_h_l[1]
            zw_info_dict['zw_xz_l'] = zw_xz_h_l[0]
        elif zw_xz_h_l:
            zw_info_dict['zw_xz_l'] = zw_info_dict['zw_xz_l'] = zw_xz_h_l[0]
    else:
        return
    if 'sc' in info_href:
        zw_infos=[text.strip() for text in sel.xpath('//dl[@class="job-require"]/dd/text()')]
    else:
        zw_infos=[text.strip() for text in sel.xpath('//p[@class="msg ltype"]/@title').extract_first().split('|')]
    zw_info_dict['zw_dd']=zw_infos[0]
    zw_info_dict['zw_gzjy']=zw_infos[1]
    zw_info_dict['zw_xl']=zw_infos[2]
    zw_info_dict['zw_zprs'] = zw_infos[3].strip('招人')
    zw_info_dict['zw_fbrq'] = zw_infos[4].strip('发布')
    sql='insert into zw_list values(NULL,"{zw_name}","{zw_xz_h}","{zw_xz_l}","{zw_dd}","{zw_gzjy}","{zw_xl}","{zw_zprs}","{zw_fbrq}")'.format(**zw_info_dict)
    print(sql)
    #cursor.execute()

if __name__=='__main__':
    url='https://www.51job.com/'
    job_info_href = []
    conn = pymysql.connect(host="127.0.0.1", port=3306, user="root", passwd="root", db="spider",charset="utf8")
    conn.autocommit(True)
    cursor = conn.cursor(cursor=pymysql.cursors.DictCursor)
    sel=Selector(text=requests.get(url).text)
    category_href=sel.xpath('//div[@class="cn hlist"]/div[@class="e"]/a/@href').extract()
    crawl_list(category_href[0])
    # with futures.ThreadPoolExecutor(50) as executor:
    #     executor.map(crawl_list,category_href)
    for href in job_info_href:
        crawl_info(href)
    # with futures.ThreadPoolExecutor(50) as executor:
    #     executor.map(crawl_info,job_info_href)
    cursor.close()
    conn.close()

