# -*- coding: utf-8 -*-
import scrapy
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
from ..items import TenxunzhaopingItem
from w3lib.html import remove_tags
import re


class TencentSpider(CrawlSpider):
    name = 'tencent'
    allowed_domains = ['tencent.com']
    start_urls = ['http://hr.tencent.com/']






    rules = (
        # 页面链接提取规则
        Rule(LinkExtractor(allow=r'social',tags = ('a'),attrs=('href')),follow=True),  # 社招
        Rule(LinkExtractor(allow=r'position\.php', tags=('a'), attrs=('href')), follow=True),  # 分页,更多
        Rule(LinkExtractor(allow=r'position_detail\.php', tags=('a'), attrs=('href')),callback='parse_item', follow=False),# 详情页
    )

    '''
    allow：满足括号中“正则表达式”的值会被提取，如果为空，则全部匹配。

    deny：与这个正则表达式（或正则表达式列表）不匹配的URL一定不提取。

    allow_domains：会被提取的链接的域名。

    deny_domains：一定不会被提取链接的域名。

    restrict_xpaths：使用的XPath表达式，和允许共同作用过滤链接。

    '''

    def parse_item(self, response):
        item = TenxunzhaopingItem()
        url = response.url
        title = response.xpath('//td/text()').extract()[0]
        info = response.css('tr.c td::text').extract()
        location = info[0]
        ptype = info[1]
        number =info[2].strip('人')
        duty = response.xpath('//*[@id="position_detail"]/div/table/tr/td/ul/li/text()').extract()
        # print(duty)
        data = ''
        for i in duty:
            i = i.strip()
            i = i.replace(' ','')
            data += i

        duty = data
        item['url'] = url
        item['title'] = title
        item['location'] = location
        item['ptype'] = ptype
        item['number'] = number
        item['duty'] = duty

        yield item


















