# -*- coding: utf-8 -*-
import scrapy
import re
from scrapy.http import Request,TextResponse
from qcwuyou.items import QcwuyouItem
import json
import logging
class N1Spider(scrapy.Spider):
    name = 'n1'
    allowed_domains = ['51job.com']
    ##start_urls = ['http://https://www.51job.com/']


    """构造函数接受命令行参数
    
    通过在构造函数里添加参数，可以接受命令行里的参数

    savepath : 保存的路径
    cate_data: 读取的分类数据的json

    """
    def __init__(self,savepath=None,cate_data="all_cate.json", *args,**kwargs):
        super(N1Spider,self).__init__(*args,**kwargs)
        if savepath==None:
            self.log("没有在命令行指定，使用默认的文件名保存",logging.WARN)
            self.savepath = "job.csv"
        else:
            self.log("输出的文件路径:"+savepath,logging.WARN)
            self.savepath=savepath

        self.cate_data = cate_data
        self.log("分类数据："+cate_data,logging.WARN)

    def start_requests(self):
        # 四个直辖市
        # 010000,020000,,050000,060000
        '''
        市辖区
          sMunicipalityArea: '010000,020000,040000,050000,060000',
          sForeignArea: '361000,362000,363000,364000,365000,366000',
        '''
        url = "https://search.51job.com/list/{},000000,{},00,9,99,%2520,2,1.html"
        # 23个省 , 5个自治区,港澳台，四个直辖市,深圳的查询时似乎不包含在广东里面,这里加进去
        provinces = ['010000','020000','040000','050000','060000','030000','070000','080000','090000','100000','110000','120000','130000','140000','150000','160000','170000','180000','190000','200000','210000','220000','230000','240000','250000','260000','270000','280000','290000','300000','310000','320000','330000','340000','350000'] ##省份
        
        for province in provinces:
            with open(self.cate_data,"r",encoding="UTF-8") as f:
                data = json.loads(f.read())
                # 默认爬取计算机行业
                data = data['计算机/互联网/通信/电子']
                # 爬取各个分类的请求
                for cate1,cates in data.items():
                    for cate in cates:
                        id = cate['id']
                        label = cate['label']
                        yield Request(url.format(province, id),callback=self.next1,meta={
                            # 后端开发这个一级的分类
                            'cate1':cate1,
                            "id":id,#对应id
                            # 具体的文字
                            "label":label,
                            "province":province
                        })
                        # 一个测试
                        # break

                    # 只爬取一个测试
                    # break

    # 1. 爬取这个类的总页数
    # 拼接页数用的
    url2 = "https://search.51job.com/list/{},000000,{},00,9,99,%2520,2,{}.html"
    def next1(self,response):
        
        data = response.text

        id = response.meta['id']
        cate1 = response.meta['cate1']
        label = response.meta['label']
        province = response.meta['province']#省份
        
        zym = re.findall('id="hidTotalPage" value="(.*?)"',data)##总页码
        # 输出当前分类的页数
        self.log( "{}-{}-{}-{}\t\t{}".format(cate1,label,province,id,str(zym[0])), logging.WARN)
        # 1.html开始
        for i in range(1,int(zym[0])):
            # id和页码
            url = self.url2.format(province, id,i)
            # print(url)
            yield Request(url,callback=self.next2,meta=response.meta)
            # break

    # 2. 所有详情的url
    def next2(self,response):
        data = response.text
        urls = re.findall('href="(.*?)"  onmousedown="">', data)  ##工作详细内容网页
        for url in urls:
            self.log("爬取工作详情:"+url,logging.DEBUG)
            yield Request(url,callback=self.parse,meta=response.meta)
            # 爬一页时break
            # break
            
    # 3. 爬取具体的
    def parse(self, response:TextResponse):
        items = QcwuyouItem()
        data = response.text
        
        # 提取省份
        # items['province'] =  response.meta['province']

        items['name'] = re.findall('<h1 title="(.*?)">',data)[0].replace(",","，")##职位名

        items['salary'] = re.findall('</h1><strong>(.*?)</strong>',data)[0]##工资
        items['cname'] = re.findall('class="catn">(.*?)<em',data)[0]##公司名字
        # 公司详情
        infos = response.xpath('//p[@class="msg ltype"]/text()').extract()
        # self.log("xx:"+str(infos),logging.WARN)
        

        # items['dz'] = test[0].replace("\xa0","")##地址
        # 至少有五个的才要
        # 广州-天河区  |  1年经验  |  大专  |  招1人  |  03-13发布
        if len(infos)>=5:
            items['city'] = infos[0].replace("\xa0","")##地址
            items['exp'] = infos[1].replace("\xa0", "")##经验
            items['edu'] = infos[2].replace("\xa0", "")##学历
            items['num'] = infos[3].replace("\xa0", "")##人数
            items['pubtime'] = infos[4].replace("\xa0", "")##发布时间
        else:
            # 丢弃,加到统计里
            self.crawler.stats.inc_value("工作要求错误的")
            return
        # items['zn'] = response.xpath('//p[3][@class="at"]/@title').extract()[0].replace(",","|")##职能
        # items['zwxx'] = re.findall('<div class="bmsg job_msg inbox">(.*?)</div>',data,re.S)##职位信息

        # 之前的爬不了https://jobs.51job.com/guangzhou-thq/120066373.html?s=01&t=0这种
        t1 =  response.xpath('//div[@class="bmsg job_msg inbox"]').xpath("string(.)").extract_first()##职位信息
     
        # 描述
        items['detail'] = t1.replace(" ","").replace("\r","").replace("\n","").replace(",","，")
        # 公司类型
        items['ctype'] = response.xpath('//p[1][@class="at"]/@title').extract()[0] ##公司类型
        # 公司规模
        items['cnum'] = response.xpath('//p[2][@class="at"]/@title').extract()[0] ##公司人数
        # 公司行业
        items['ctrade']= "|".join(response.xpath("//span[@class='i_trade']").xpath("parent::node()/a/text()").extract())
    
        # 分类
        items['cate1'] = response.meta['cate1']
        items['cate2'] = response.meta['label']
        # url
        items['url'] = response.url
        # 福利
        jtag = "|".join(response.xpath("//div[@class='jtag']/div/span/text()").extract())
        items['welfare'] = jtag

        yield items

# 爬取完成时执行
    def closed(self,reason):
        print("爬取完成..")
        # 职位信息错误的统计
        print(self.crawler.stats.get_stats())
        self.log("closed.....")
        pass


