import scrapy
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
import re

class PartySpider(CrawlSpider):
    name = 'party'
    allowed_domains = ['douban.com']
    start_urls = ['https://www.douban.com/location/shenzhen/events/week-all']

    #定义提取url地址规则，rule里是一个元祖，说明有顺序
    rules = (
        #LinkExtractor连接提取器，提取url地址
        #callback 提取出来的url地址的response会交给callback处理，有的页面不需要处理，可以不写
        #follow 表示当前url地址的相应是否重新经过rules提取
        Rule(LinkExtractor(allow=r'https://www.douban.com/event/\d+/'), callback='parse_item'),
        Rule(LinkExtractor(allow=r'\?start=\d+'), follow="True"),

    )

    #parse函数有特殊功能，不能定义
    def parse_item(self, response):
        item = {}
        item["title"] =response.xpath("//h1[@itemprop='summary']/text()").extract_first()
        item["date"] = response.xpath("//li[@class='calendar-str-item']/text()").extract()
        print (item)
    #     yield scrapy.Request(
    #         url,
    #         callback=self.parse_item,
    #         meta = {"item":item}
    #     )
    #
    # def parse_detail(self,response):
    #     item = response.meta["item"]
    #     item["price"] = "///"
    #     yield item