import scrapy
import re, json #导包，正则表达式和 json
from a51job.items import A51JobItem,JobDescItem
class A51jobSpider(scrapy.Spider):
    name = 'shells'




    allowed_domains = ['search.51job.com']
    start_urls = [
        'https://search.51job.com/list/010000,000000,0000,00,9,99,python,2,1.html?lang=c&postchannel=0000&workyear=99&cotype=99&degreefrom=99&jobterm=99&companysize=99&ord_field=0&dibiaoid=0']
    base_urls = 'https://search.51job.com/list/010000,000000,0000,00,9,99,python,2,%d.html?lang=c&postchannel=0000&workyear=99&cotype=99&degreefrom=99&jobterm=99&companysize=99&ord_field=0&dibiaoid=0'
    # 解析函数
    def parse(self, response):
        js = response.xpath('//script[contains(@type,"text/javascript") and not(@src)]').extract_first()  #提取js中的数据
        r = js.split("__SEARCH_RESULT__")[1][3:-9]  #切片
        d = json.loads(r)    #把json格式转换为python对象
        companies = d.get("engine_jds")    #获取js中想要提取的数据
        for company in companies:
            item = A51JobItem()    # 初始化容器必须放置循环内
            # item["p_name"] = company.get("job_name")  # 岗位
            item["p_salary"] = company.get("providesalary_text")  # 薪水
            item["p_address"] = company.get("workarea_text")  # 工作地点
            item["p_edu"] = company.get("attribute_text")[2]  # 学历
            item["p_time"] = company.get("attribute_text")[1]  # 经验
            item["p_company"] = company.get("company_name")  # 公司名称
            item["p_additional"] = company.get("jobwelf")  # 其他

            item["job_href"] = company.get("job_href")  # url
            yield item
            yield scrapy.Request(url=company.get("job_href"), callback=self.parse_job_detail, dont_filter=True)

        current_page = int(d.get('curr_page'))
        # 分页
        if current_page == 1:
            total_page = int(d.get('total_page'))
            for i in range(2, total_page):
                yield scrapy.Request(url=self.base_urls % i, callback=self.parse, dont_filter=True)
    # 详情页
    def parse_job_detail(self, response):
        result = response.xpath('//div[contains(@class,"bmsg job_msg inbox")]/p/text()').extract() or response.xpath(
            "//div[contains(@class, 'bmsg job_msg inbox')]/div/span/text()").extract()
        print(result)
        job = JobDescItem()
        job["content"] = ";".join(result)
        yield job


