from runoob.items import RunoobItem
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule


class PythonTutorialSpider(CrawlSpider):
    name = 'python_tutorial'
    allowed_domains = ['runoob.com']
    # start_urls = ['https://www.runoob.com/python3/python3-tutorial.html']
    start_urls = ['https://www.runoob.com/manual/pythontutorial3/docs/html/index.html']

    rules = (
        # Rule(LinkExtractor(allow=r'https://www.runoob.com/python3/python3-+'), callback='parse', follow=False),
        Rule(LinkExtractor(allow=r'https://www.runoob.com/manual/pythontutorial3/docs/html/+'), callback='parse', follow=False),
    )

    def parse(self, response):
        # HTML标题
        # name = response.xpath('//div[@class="article-intro"]/h1/text()').get().strip()
        # if response.xpath('//div[@class="article-intro"]/h1/span/text()').get():
        #     name += ' ' + response.xpath('//div[@class="article-intro"]/h1/span/text()').get().strip()

        name = response.xpath('//div[@class="document"]/div/h1/text()').get()
        # if response.xpath('//div[@class="document"]/h1/span/text()').get():
        #     name += ' ' + response.xpath('//div[@class="article-intro"]/h1/span/text()').get().strip()

        # 爬取日志
        print(response, name)

        # HTML内容
        content = response.xpath('//div[@class="document"]').get()

        # 实例化
        item = RunoobItem(name=name, content=content)

        yield item
