# -*- coding: utf-8 -*-
import scrapy
from Tencent.items import TencentItem
#创建爬虫类
class TencentspiderSpider(scrapy.Spider):
    name = 'tencentSpider'#爬虫名字
    allowed_domains = ['tencent.com']#容许爬虫的作用范围

    # 定义开始的URL
    offset = 0
    url = 'https://hr.tencent.com/position.php?&start='
    #urll='#a'

    start_urls = [url + str(offset)]  # 爬虫开始的URL


    def parse(self, response):
        # 继承
        item = TencentItem()
        # 根节点
        movies = response.xpath("//tr[@class='odd']|//tr[@class='even']")
        for each in movies:
            item['职位名称']=each.xpath(".//td[@class='l square']/a/text()").extract()[0]
            item['链接'] = each.xpath(".//td[@class='l square']/a/@href").extract()[0]
            #item['leibie'] = each.xpath("//tr[@class='odd']/td[2]/text()|//tr[@class='even']/td[2]/text()").extract()[0]
            item['职位类别'] = each.xpath("//tr[@class='odd']/td[2]/text()|//tr[@class='even']/td[2]/text()").extract()[0]
            #data = response.xpath(".//tr[@class='odd']/td[2]|//tr[@class='even']/td[2][descendant-or-self::text()]")
            #item['leibie'] = data.xpath('string(.)').extract()


            item['人数'] = each.xpath("//tr[@class='odd']/td[3]/text()|//tr[@class='even']/td[3]/text()").extract()[0]
            item['地点'] = each.xpath("//tr[@class='odd']/td[4]/text()|//tr[@class='even']/td[4]/text()").extract()[0]
            item['发布时间'] = each.xpath("//tr[@class='odd']/td[5]/text()|//tr[@class='even']/td[5]/text()").extract()[0]

            # 异常处理
            #if len(quote) != 0:
                #item['quote'] = quote[0]
            print(item)
            yield item

        if self.offset < 2840:
            self.offset += 10
            # 每次处理完一页之后，重新发送下一页请求
            # self offset 自增25，同时拼接为新的URL并调用回调函数，self parse 处理response
        yield scrapy.Request(self.url + str(self.offset),callback=self.parse)
