# -*- coding: utf-8 -*-
import scrapy
from Tencent.items import TencentItem

class TencentSpider(scrapy.Spider):
    name = 'tencent'
    allowed_domains = ['tencent.com']
    base_url = 'https://hr.tencent.com/position.php?keywords=&lid=2218&tid=87&start={}'
    url_list = []
    for i in range(9):
        url_list.append(base_url.format(i * 10))

    start_urls = url_list

    def parse(self, response):
        tr_list = []

        tr_list = response.xpath('//tr[@class="even"] | //tr[@class="odd"]')

        for tr in tr_list:
            item = TencentItem()

            # 职位名称
            item['work_name'] = tr.xpath('./td[1]//text()').extract_first()
            # 职位类别
            item['work_type'] = tr.xpath('./td[2]/text()').extract_first()
            # 人数
            item['work_count'] = tr.xpath('./td[3]/text()').extract_first()
            # 地点
            item['work_place'] = tr.xpath('./td[4]/text()').extract_first()
            # 发布时间
            item['work_time'] = tr.xpath('./td[5]/text()').extract_first()

            yield item
            # 获取下一页rul
            # detail_url = 'https://hr.tencent.com/' + tr.xpath('./td[1]//@href').extract_first()
            # yield scrapy.Request(detail_url, callback=self.parse_detail)

            # 提取下一页的href并拼接url
            next_url = 'https://hr.tencent.com/' + response.xpath('//a[text()="下一页"]/@href').extract_first()
            # 判断是否是最后一页
            if response.xpath('//a[text()="下一页"]/@href').extract_first() != 'javascript:;':
                # 构造scrapy.Request对象，并yield给引擎
                # 利用callback参数指定该Request对象之后获取的响应用哪个函数进行解析
                # 利用meta参数将本函数中提取的数据传递给callback指定的函数
                # 注意这里是yield
                yield scrapy.Request(next_url, callback=self.parse)

    def parse_detail(self, response):
        detail_item = TencentItem()

        ul_list = response.xpath('//ul[@class="squareli"]')
        # 职责
        detail_item['work_duty'] = ul_list[0].xpath('.//li/text()').extract()
        # 2.要求
        detail_item['work_requir'] = ul_list[1].xpath('.//li/text()').extract()

        yield detail_item

































