# -*- coding: utf-8 -*-
# 参考资料：http://blog.csdn.net/u012150179/article/details/34913315和自己做的手册
"""
@ CrawlSpider 是Spider的派生类，多了一个rules参数，定义提取的动作。
@ LinkExtract 链接提取器
"""
from scrapy.spiders import CrawlSpider, Rule
from scrapy.linkextractors import LinkExtractor
from scrapy.selector import Selector

from wenzhen.items import XywyItem


class XywytwoSpider(CrawlSpider):
    name = "xywy"
    allowed_domains = ["xywy.com"]
    start_urls = (
        'http://club.xywy.com/list_640_all_1.htm',
    )

    # allow：满足括号中“正则表达式”的值会被提取，如果为空，则全部匹配
    # restrict_xpaths：使用xpath表达式，和allow共同作用过滤链接
    rules = [
        Rule(LinkExtractor(allow=(r'list_640_all'), restrict_xpaths=('//div[@class="clearfix pageStyle tc mt20 pb20 f12 pagelink"]/a[last()-1]')),
             callback='parse_item',
             follow=True
             )
    ]

    def parse_item(self,response):
        sel = Selector(response)
        item = XywyItem()

        # 抓取页面的URL列表
        link = sel.xpath('//table[@class="f12 kstable"]/tr/td[1]/a[2]/@href').extract()
        item['link'] = link
        return item

