# -*- coding: utf-8 -*-
import scrapy
from scrapy import Request
from webspider.items import WebspiderItem
<<<<<<< HEAD
from webspider.dict.area_dict import dict
=======
import re
from copy import deepcopy
>>>>>>> a14f5150eb56dfc65bb96f2ac8b48776d600553f
#针对个别地区和职业领域

class A51jobSpider(scrapy.Spider):
    name = '51job'
    allowed_domains = ['51job.com']
    #产生新的url请求
    def start_requests(self):
        area_list = ['010000', '020000', '030200', '040000', '070200', '070300', '180200', '060000', '090200', '01']
<<<<<<< HEAD
=======
        # area_list = ['000000']
>>>>>>> a14f5150eb56dfc65bb96f2ac8b48776d600553f
        # ft_list = {'python','java','.NET','Ruby','Go','PHP','C/C++'}
        ft = {
            "0100": "后端开发",
            "0106": "高级软件工程师",
            "0107": "软件工程师",
            "0121": "Java开发工程师",
            "0120": "PHP开发工程师",
            "0122": "C/C++开发工程师",
            "0124": "Python开发工程师",
            "0126": ".NET开发工程师",
            "0151": "Ruby开发工程师",
            "0152": "Go开发工程师",
            "0130": "大数据开发工程师",
            "0129": "Hadoop工程师",
            "0131": "爬虫开发工程师",
            "0132": "脚本开发工程师",
            "0133": "多媒体开发工程师",
            "0117": "ERP技术开发",
            "0128": "区块链开发",
            "0143": "系统架构设计师",
            "0123": "系统分析员",
            "0149": "技术文员/助理",
            "0150": "技术文档工程师",
            "0142": "其他"
        }
        for area in area_list:
            for job in ft.keys():
                urls = 'https://search.51job.com/list/{},000000,{},00,9,99,%2520,2,1.html'.format(area, job)
                yield Request(
                    urls,
                    callback=self.parse,
                    meta={'key_word': ft[job]},
                    )

    #爬取url的信息
    def parse(self, response):
        job_list = response.xpath("//div[@class='el']")[4:]
        for job in job_list:
            item = WebspiderItem()
            item['Key_word'] = response.meta.get('key_word')
            item["positionName"] = job.xpath("./p[@class='t1 ' or 't1 tg1']/span/a/@title").extract_first()
            item["companyFullName"] = job.xpath("./span[@class='t2']/a/@title").extract_first()
            item["salary"] = job.xpath("./span[@class='t4']/text()").extract_first()
            item["location"] = job.xpath("./span[@class='t3']/text()").extract_first()
            item["time"] = job.xpath("./span[@class='t5']/text()").extract_first()
            item['url'] = job.xpath("./p[@class='t1 'or't1 tg1']/span/a/@href").extract_first()
            yield item
        #换页,这里可以用crawlspider实现换页请求
        try:
            page_count = int(response.xpath("//div[@class='dw_page']//input/@value").extract_first())
            current_page = int(response.xpath("//div[@class='dw_page']//input/@value").extract()[1])
            if current_page < page_count:
                next_url = response.url.replace("1.html", "{}.html".format(current_page + 1))
                print(next_url)
                yield scrapy.Request(
                    next_url,
                    callback=self.parse
                )
        except:
            print("error")
