import hashlib
import time
from urllib import parse

import scrapy
from scrapy import Request

# scrapy 是异步IO框架，没有多线程，也没有引入消息队列，通过yield 丢出去服务去处理
from blogs.items import BlogsItem, BlogsItemLoader


class BlogSpiderSpider(scrapy.Spider):
    name = 'blog_spider'
    allowed_domains = ['news.cnblogs.com']
    start_urls = ['https://news.cnblogs.com/']

    def parse(self, response):
        # 获取列表(不要调用extract(),调用了就是一个str)
        news_list = response.xpath('.//*[@id="news_list"]//div[@class="news_block"]')
        for news in news_list:
            # 使用item_loader，selector 就是指定的选择器
            item_loader = BlogsItemLoader(item=BlogsItem(), selector=news)
            item_loader.add_xpath("title", './/div[2]/h2/a/text()')
            item_loader.add_xpath("img_urls", './/div[2]/div[1]/a/img/@src')
            item_loader.add_xpath("union_code", './/div[2]/h2/a/@href')
            # 使用item_loader.add_value(''),可以直接设置值
            item_loader.add_value("time", time.time())
            # 加载item
            blog_item = item_loader.load_item()
            # 抛出item，交由pipline去处理
            yield blog_item
            # yield Request(url=parse.urljoin(response.url, title_url), meta={"front_url": img_url},
            #               callback=self.parse_detail)
            # print(title_url)
        # 提取下一页(若存在多个页面且页面不确定情况下，可以用 a[contains(text(),"Next >") ，来判断众多标签中包含这个字符的数据)
        next_page = response.xpath('.//*[@id="sideleft"]/div[5]/a[contains(text(),"Next >")]/text()').extract_first()
        if "Next >" in next_page:
            next_url = response.xpath('.//*[@id="sideleft"]/div[5]/a[contains(text(),"Next >")]/@href').extract_first()
            print("即将跳转到url:", next_url)
            yield Request(url=parse.urljoin(response.url, next_url), callback=self.parse_detail)

    def parse_detail(self, response):
        # print(response.text)
        title = response.xpath('/html/head/title/text()').extract_first()
        print("title:", title)
