# -*- coding: utf-8 -*-
import scrapy
from pyquery import PyQuery as pq
from job51.settings import logger
from job51.items import jobList, jobDetail
import time, re, random
from job51.other.models import session, jobs, job_detail

class A51jopSpider(scrapy.Spider):
    name = '51job'
    allowed_domains = ['www.51job.com', 'search.51job.com', 'jobs.51job.com']
    start_urls = ['https://search.51job.com/list/030200,000000,0000,00,9,99,php,2,1.html?lang=c&stype=1&postchannel=0000&workyear=99&cotype=99&degreefrom=99&jobterm=99&companysize=99&lonlat=0%2C0&radius=-1&ord_field=0&confirmdate=9&fromType=4&dibiaoid=0&address=&line=&specialarea=00&from=&welfare=']

    def parse(self, response):
        html = response.text
        # logger.info('myprint')
        doc = pq(html)
        # logger.info(html)
        rows = doc('#resultList .el:gt(0)').items()
        # logger.info('myprint')
        # logger.info(list(rows))
        if 'page' in response.meta:
            page = response.meta['page']
        else:
            page = 1
        i = 0
        for row in rows:
            # i += 1
            # if i > 3:
            #     break
            job_list_item = jobList()
            job_list_item['station'] = row.find('.t1 span a').text() # 如: python工程师
            job_list_item['r_jobid'] = row.find('input[name="delivery_jobid"]').val()
            job_list_item['job_detail_url'] = row.find('.t1 span a').attr('href')
            job_list_item['company'] = row.find('.t2 a').text()
            job_list_item['address'] = row.find('.t3').text()
            job_list_item['salary'] = row.find('.t4').text()
            job_list_item['release_time'] = time.strftime('%Y')+'-'+row.find('.t5').text()
            job_list_item['page'] = page
            # logger.info('myprint job_detail_url')
            # logger.info(str(job_list_item['job_detail_url']))
            yield job_list_item
            deta = session.query(job_detail).filter_by(r_jobid=job_list_item['r_jobid']).first()
            if getattr(deta, 'r_jobid', None):
                logger.info('详情页已存在...')
                continue
            yield scrapy.Request(url=(job_list_item['job_detail_url']), callback=self.job_detail_parse, meta={'r_jobid':job_list_item['r_jobid']})

        if i < 300000:
            next_page = doc('.dw_page .bk').eq(1).children('a').attr('href')
            logger.info('next_page' + next_page)
            # url = response.urljoin()
            pattern = re.compile('.*,(.*?)\.html', re.S)
            current_page_num = re.findall(pattern, next_page)
            logger.info('正在爬取第' + str(current_page_num[0]) + '页')
            sleep_time = random.randint(1,2)
            time.sleep(sleep_time)

            yield scrapy.Request(url=next_page, callback=self.parse, meta={'page': current_page_num[0]})

    def job_detail_parse(self, response):
        html = response.text
        doc = pq(html)
        # logger.info('job_detail_parse')
        detail_item = jobDetail()
        detail_item['station'] = doc('.cn').children('h1').attr('title')
        detail_item['company'] = doc('.cn .cname a:eq(0)').text()
        detail_item['r_jobid'] = response.meta['r_jobid']
        # detail_item['msg_1'] = doc('.msg.ltype').text() # 如: 广州-越秀区  |  3-4年经验  |  招1人  |  08-06发布

        msg_1 = doc('.msg.ltype').text() # 如: 广州-越秀区  |  3-4年经验  |  招1人  |  08-06发布
        # str = '广州-天河区  |  1年经验  |  大专  |  招若干人  |  08-07发布  |  计算机科学与技术 计算机网络'
        msg_1 = msg_1.replace('  ', '').split('|')
        detail_item['address'] = msg_1[0]
        detail_item['experience'] = msg_1[1]
        detail_item['xueli'] = msg_1[2]
        detail_item['people_num'] = msg_1[3] if len(msg_1[3])>3 else 0
        release_time = msg_1[4] if len(msg_1)>4 else '00'
        detail_item['release_time'] = time.strftime('%Y')+'-'+release_time

        detail_item['tags'] = doc('.jtag').text() # 如: 五险一金 员工旅游 餐饮补贴 周末双休
        detail_item['job_info'] = doc('.bmsg.job_msg.inbox').text()
        detail_item['contact_way'] = doc('.tCompany_main .tBorderTop_box:eq(1) .bmsg.inbox .fp').text()
        detail_item['company_info'] = doc('.tCompany_main .tBorderTop_box:eq(2)').text()
        yield detail_item
        # logger.info(company_info)
        sleep_time = random.randint(1,2)
        time.sleep(sleep_time)
