#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
@File Name: cnblogs_spider.py
@Author: LTisme
@Date: 2021/7/31 19:26
@Description: Scrapy选择器构建于lxml库之上，这意味着它们在速度和解析准确性上非常相似，用法也基本一样，除了下面多了个extract()
"""
import scrapy
from ..items import CnBlogSpiderItem
from scrapy.spiders.crawl import Rule, CrawlSpider
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders.feed import XMLFeedSpider


class CnBlogsSpider(scrapy.Spider):
    name = "cnblogs"  # 必须！爬虫的名字
    allowed_domains = ["cnblogs.com"]  # 可选，允许的域名
    start_urls = [  # 必须！起始的url
        "http://www.cnblogs.com/qiyeboy/default.html?page=1"
    ]

    def parse(self, response, **kwargs):
        """"""
        # 首先抽取所有的文章
        papers = response.xpath(".//*[@class='day']")
        # 然后从每篇文章中抽取数据
        for paper in papers:
            url = paper.xpath(".//*[@class='postTitle']/a/@href").extract()[0]  # 比lxml的etree函数提取多了个extract方法
            title = paper.xpath(".//*[@class='postTitle']/a/span/text()").extract()[0]
            time = paper.xpath(".//*[@class='dayTitle']/a/text()").extract()[0]
            content = paper.xpath(".//*[@class='postCon']/div/text()").extract()[0]
            item = CnBlogSpiderItem(url=url, title=title, time=time, content=content)
            # print(f"标题:{title}\n网址:{url}\n时间:{time}\n内容:{content}\n\n\n")
            request = scrapy.Request(url=url, callback=self.parse_body)
            print(f"这一步parse先执行,此时item为{item}")
            request.meta["item"] = item  # 将item暂存
            yield request
        # 这一步是抽取下一页的链接，当yield完了后开始；也就是一页的内容完了后开始下一页
        next_page = scrapy.Selector(response).re(r'<a href="(\S*)">\s*下一页\s*</a>')
        # 也可以是
        # import re
        # next_page = re.findall(r'<a href="(\S*)">\s*下一页\s*</a>', response.text)
        if next_page:
            yield scrapy.Request(url=next_page[0], callback=self.parse)

    def parse_body(self, response):
        item = response.meta["item"]
        print("此时parse_body才获得了item！")
        body = response.xpath(".//*[@class='postBody']//img//@src").extract()
        item["image_urls"] = body  # 把这个内容传给这个域
        yield item                                      # 这个item会被pipelines接收到


class CnBlogsCrawlSpider(CrawlSpider):
    name = "cnblogs_crawlspider"  # 必须！爬虫的名字
    allowed_domains = ["cnblogs.com"]  # 可选，允许的域名
    start_urls = [  # 必须！起始的url
        "http://www.cnblogs.com/qiyeboy/default.html?page=1"
    ]
    rules = (
        Rule(LinkExtractor(allow=(r"/qiyeboy/default\.html\?page=\d{1,}",)),
             follow=True,
             callback="parse_item",
             ),
    )

    def parse_item(self, response, **kwargs):
        # 首先抽取所有的文章
        papers = response.xpath(".//*[@class='day']")
        # 然后从每篇文章中抽取数据
        for paper in papers:
            url = paper.xpath(".//*[@class='postTitle']/a/@href").extract()[0]  # 比lxml的etree函数提取多了个extract方法
            title = paper.xpath(".//*[@class='postTitle']/a/span/text()").extract()[0]
            time = paper.xpath(".//*[@class='dayTitle']/a/text()").extract()[0]
            content = paper.xpath(".//*[@class='postCon']/div/text()").extract()[0]
            item = CnBlogSpiderItem(url=url, title=title, time=time, content=content)
            print(f"标题:{title}\n网址:{url}\n时间:{time}\n内容:{content}\n\n\n")
            request = scrapy.Request(url=url, callback=self.parse_body)
            request.meta["item"] = item  # 将item暂存
            yield request
        # 这一步是抽取下一页的链接，当yield完了后开始；也就是一页的内容完了后开始下一页
        # next_page = scrapy.Selector(response).re(r'<a href="(\S*)">\s*下一页\s*</a>')
        # if next_page:
        #     yield scrapy.Request(url=next_page[0], callback=self.parse_item)

    def parse_body(self, response):
        item = response.meta["item"]
        print(1111111111111111111111111)
        body = response.xpath(".//*[@class='postBody']//img//@src").extract()
        item["image_urls"] = body  # 把这个内容传给这个域
        print(2222222222222222222222222)
        yield item


class CnBlogsXmlSpider(XMLFeedSpider):
    name = "cnblogs_xmlspider"
    allowed_domains = ["cnblogs.com"]
    start_urls = ["http://feed.cnblogs.com/blog/u/269038/rss"]
    iterator = "html"
    itertag = "entry"

    def adapt_response(self, response):
        return response

    def parse_node(self, response, selector):
        print(selector.xpath("id/text()").extract()[0])
        print(selector.xpath("title/text()").extract()[0])
        print(selector.xpath("summary/text()").extract()[0])
