# -*- coding: utf-8 -*-
import scrapy
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule

from ..items import LywItem


class LywSpiderSpider(CrawlSpider):
    name = 'lyw_spider'
    allowed_domains = ['lieyunwang.com']
    start_urls = ['https://www.lieyunwang.com/latest/p1.html']

    rules = (   # 规则
        Rule(LinkExtractor(allow=r'/latest/p\d+\.html'), follow=True),
        Rule(LinkExtractor(allow=r'/archives/\d+'), callback='parse_detail', follow=False),
        # LinkExtractor：用来定义需要爬取的url规则
        # Rule：用来定义这个url爬取后的处理方式，比如是否需要跟进follow 是否需要执行回调函数callback
    )

    def parse_detail(self, response):
        title_list = response.xpath('//h1[@class="lyw-article-title"]/text()').getall()
        title = ''.join(title_list).strip()
        pub_time = response.xpath('//h1[@class="lyw-article-title"]/span/text()').get()
        author = response.xpath('//a[@class="author-name open_reporter_box"]/text()').get()
        content = response.xpath('//div[@id="main-text-id"]//text()').getall()
        content = ''.join(content).strip()
        origin =  response.url
        item = LywItem(title=title,pub_time=pub_time,author=author,content=content,origin=origin)
        return item