# -*- coding: utf-8 -*-
import scrapy
from example.items import ExampleItem
from scrapy.spiders import CrawlSpider, Rule
from scrapy.linkextractors import LinkExtractor


class NeteaseSpider(CrawlSpider):
    name = "netease"
    allowed_domains = ["tech.163.com"]
    start_urls = (
        'http://tech.163.com/',
    )
    rules = (
        # Extract links matching 'category.php' (but not matching 'subsection.php')
        # and follow links from them (since no callback means follow=True by default).
        Rule(LinkExtractor(allow=('http://tech.163.com/16/07[0-9][0-9]/.',)),
             callback='parse_content', follow=True),
    )

    def parse_content(self, response):
        item = ExampleItem()
        # 直接通过title找到标题更简洁
        title = response.xpath("//html/head/title/text()").extract()[0]
        item["title"] = title
        self.log("Title is : %s" % title, 30)
        item["url"] = response.url
        # item["time"] = response.xpath("//div[@class='post_time_source']").extract()[0][:9]
        # 通过source来解析到发布的时间
        time = response.xpath("//div[@class='post_time_source']/text()").extract()
        item["time"] = time[0].strip()[:19]
        # item["content"] = response.xpath("//div[@id='endText']").extract()
        # 按照段落进行解析可以过滤掉多余的页面标签
        item["content"] = response.xpath("//div[@id='endText']/p/text()").extract()
        return item
