# -*- coding: utf-8 -*-

# 1.	使用scrapy框架进行爬取（10分）
# 9.	将抓取的数据存储到json文件中，并且进行展示（10分）
# 10.	全程需要有运行调试的过程，并且有注释 （10分）

import scrapy
from ..items import TengxunzhaopinItem


class ZhaopinSpider(scrapy.Spider):
    name = 'zhaopin'

    # allowed_domains = ['xxx']
    # start_urls = ['xxx']

    # 2.	抓取前10页的信息(10分）
    def start_requests(self):
        for page in range(0, 100, 10):
            url = 'https://hr.tencent.com/position.php?&start=' + str(page) + '#a'
            yield scrapy.Request(url=url, callback=self.parse)

    def parse(self, response):
        # 3.	请使用xpath进行数据解析 （10分）
        all_tr = response.xpath("//tr[@class='even'] | //tr[@class='odd']")
        for tr in all_tr:
            # 4.	抓取职位名称，职位类别（10分）
            mingcheng = tr.xpath("./td[@class='l square']/a/text()")[0].extract()
            leibie = tr.xpath("./td[2]/text()")
            if len(leibie) == 0:
                leibie = '空'
            else:
                leibie = leibie[0].extract()
            # 5.	抓取当前职位招聘的人数 （10分）
            renshu = tr.xpath("./td[3]/text()")[0].extract()
            # 6.	抓取当前职位招聘的地点（10分）
            didian = tr.xpath("./td[4]/text()")[0].extract()
            # 7.	抓取当前职位发布的照片时间（10分）
            shijian = tr.xpath("./td[5]/text()")[0].extract()
            href = 'https://hr.tencent.com/' + tr.xpath("./td[@class='l square']/a/@href")[0].extract()
            yield scrapy.Request(url=href, callback=self.parse_xq,
                                 meta={'mingcheng': mingcheng, 'leibie': leibie, 'renshu': renshu, 'didian': didian,
                                       'shijian': shijian, 'href': href})

    # 8.	进入详细页，获取工作职责和工作要求（10分）
    def parse_xq(self, response):
        mingcheng = response.meta['mingcheng']
        leibie = response.meta['leibie']
        renshu = response.meta['renshu']
        didian = response.meta['didian']
        shijian = response.meta['shijian']
        href = response.meta['href']
        print(mingcheng, href)
        zhize = response.xpath("string(//tr[@class='c'][1]/td[@class='l2']/ul[@class='squareli'])")[0].extract()
        yaoqiu = response.xpath("string(//tr[@class='c'][2]/td[@class='l2']/ul[@class='squareli'])")[0].extract()
        item = TengxunzhaopinItem()
        item['mingcheng'] = mingcheng
        item['leibie'] = leibie
        item['renshu'] = renshu
        item['didian'] = didian
        item['shijian'] = shijian
        item['href'] = href
        item['zhize'] = zhize
        item['yaoqiu'] = yaoqiu
        yield item
