# -*- coding: utf-8 -*-
import scrapy
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule


class TenSpider(CrawlSpider):
    name = 'ten'
    allowed_domains = ['sina.com']
    start_urls = ['https://news.sina.com.cn/']

    # 提取规则 crawlSpider
    # 1 自动提取链接 根据设定的规则
    # 2 自动发送请求 解析response

    # 1 提取连接诶 allow='规则'
    # 2 callback 数据解析函数
    # follow (实现翻页)是否跟进 True跟进 默认不跟进 因为callback返回了数据，所以不跟进
    rules = (
        Rule(LinkExtractor(allow='headline'), callback='parse_item', follow=False),
    )

    def parse_item(self, response):
        print(response.url)



# class TenSpider(CrawlSpider):
#     name = 'ten2'
#     allowed_domains = ['tencent.com']
#     start_urls = ['https://hr.tencent.com/position.php?keywords=python&lid=0&tid=0']
#
#     # 提取规则
#     rules = (
#
#         # 1.自动提取符合规则的 列表url, 自动发送列表页请求 获取response
#         Rule(LinkExtractor(allow="start"), follow=True),
#         # 2.自动提取符合规则的 详情页url, 自动发送详情页请求 获取response
#         Rule(LinkExtractor(allow="position_detail"), callback='parse_detail', follow=False),
#
#     )
#
#     # 解析函数 --解析 详情页的数据
#     def parse_detail(self, response):
#         dict_data = {}
#         dict_data['work_name'] = response.xpath('//*[@id="sharetitle"]/text()').extract_first()
#         dict_data['work_place'] = response.xpath(
#             '//*[@id="position_detail"]/div/table/tr[2]/td[1]/text()').extract_first()
#
#         # item --engine - pipeline
#         return dict_data



























