import copy
import json
import re

import scrapy

from a51job.items import A51JobItem


class A51jobSpider(scrapy.Spider):
    name = '51job'
    allowed_domains = ['search.51job.com']
    #获取urls数据，在网页上爬取20页数据
    start_urls = ['https://search.51job.com/list/000000,000000,0000,00,9,99,'
                  '%25E5%25A4%25A7%25E6%2595%25B0%25E6%258D%25AE%25E5%25BC%2580%25E5%258F%2591,2,{}.html?'
                  'lang=c&postchannel=0000&workyear=99&cotype=99&degreefrom=99'
                  '&jobterm=99&companysize=99&ord_field=0'
                  '&dibiaoid=0&line=&welfare='.format(i) for i in range(1, 20)]

    def parse(self, response):
        item = A51JobItem()
        body = response.text  # 获取网页源代码
        data = re.findall('window.__SEARCH_RESULT__ =(.+)}</script>', str(body))[0] + "}"  # 提取json数据的字符串
        data = json.loads(data)  # 转化为json格式
        companies = data.get("engine_jds")
        for result in companies:
            item['p_name'] = result['job_title'] # 职位名称
            item['p_salary'] = result['providesalary_text']  # 薪资
            item['p_address'] = result['workarea_text']  # 工作地点
            item['p_time'] = result['attribute_text'][1]  # 工作经验
            item['p_edu'] = result['attribute_text'][2]  # 学历
            item['p_company'] = result['company_name']  # 公司名称
            item["p_additional"] = result["jobwelf"]  # 其他
            # print(result['company_name'])
            job_href = result['job_href']  # 获取链接
            yield scrapy.Request(job_href, meta={'item': copy.deepcopy(item)}, callback=self.detailed, dont_filter=True)
       #提交数据上传
    def detailed(self, response):
        item = response.meta['item']
        yield copy.deepcopy(item)



