# -*- coding: utf-8 -*-
import scrapy
from Tencent.items import TencentItem
# 目标任务：爬取腾讯社招信息，需要爬取的内容为：职位名称，职位的详情链接，职位类别，招聘人数，工作地点，发布时间

class TencentSpider(scrapy.Spider):
    name = 'tencent'
    # allowed_domains = ['tencent.com']
    # start_urls = ['http://tencent.com/']
    url = 'https://hr.tencent.com/position.php?&start='
    offset = 0
    start_urls = [url + str(offset)]

    def parse(self, response):
        info = response.xpath('//tr[@class="even"] | //tr[@class="odd"]')
        count = 0
        # print(info)
        for i in info:
            positionname = i.xpath('./td[1]/a/text()').extract()[0]
            # positionlink = i.xpath('./td[1]/a/@href').extract()[0]
            positionType = i.xpath('./td[2]/text()').extract()[0]
            if positionType is None:
                positionType = '暂无类别'

            peopleNum = i.xpath('./td[3]/text()').extract()[0]
            workLocation = i.xpath('./td[4]/text()').extract()[0]
            publishTime = i.xpath('./td[5]/text()').extract()[0]
            count += 1
            print(count, positionname, '\n', peopleNum, '\n', workLocation, '\n', publishTime)
            item = TencentItem()
            item['positionname'] = positionname
            # item['positionlink'] = positionlink
            item['positionType'] = positionType
            item['peopleNum'] = peopleNum
            item['workLocation'] = workLocation
            item['publishTime'] = publishTime

            yield item
        if self.offset < 50:
            self.offset += 10

        yield scrapy.Request(self.url + str(self.offset), callback=self.parse)

