from typing import ContextManager
import scrapy
import re
import sys
sys.path.append("/Users/zxkkk/gitrepo/spider/searchBlogs")
from searchBlogs.items import SearchblogsItem
class BlogsSpider(scrapy.Spider):
    name = 'searchBlogs'
    allowed_domains = ['cnblogs.com']
    start_urls = ['https://news.cnblogs.com/']

    def parse(self, response):
        # news_list = response.xpath('//*[@id="news_list"]').extract() #得到新闻的列表
        news_block = response.xpath('//div[@class="news_block"]')
        for new in news_block:
            article_item = SearchblogsItem()
            article_item['title'] = new.xpath('.//h2[@class="news_entry"]/a/text()').get()
            new_half_link = new.xpath('.//h2[@class="news_entry"]/a/@href').get()#得到短链接，需要拼接一下
            article_item['link'] = "http://news.cnblogs.com"+(new_half_link)
            new_content = ""
            new_contents = new.xpath('.//div[@class="entry_summary"]/text()').getall()
            for i in new_contents:
                content = "".join(i.split())#先按照字符串的空格，换行符，制表符这些把字符串切分开，然后再连起来。相当于去掉了这些符号。
                article_item['content'] = content + new_content
            new_pic_url = new.xpath('.//div[@class="entry_summary"]/a/img/@src').extract_first("")
            if ('images0' in new_pic_url )or ('images2015'  in new_pic_url) or ('images2017'  in new_pic_url):
                article_item['pic'] = "https:"+(new_pic_url)
            else:
                article_item['pic'] = new_pic_url
            article_item['date'] = new.xpath('.//div[@class="entry_footer"]/span[@class="gray"]/text()').get()
            yield article_item
        next_link = response.xpath("//div[@class='pager']/a[last()]/@href").get()
        if next_link:
            link = "http://news.cnblogs.com" + next_link
            yield scrapy.Request(link,callback=self.parse,dont_filter=True) 
    
        
    
            
