import re
import scrapy
import logging

from urllib import parse
from scrapy import Request

from ArticleSpider.items import CnblogsItem

logger = logging.getLogger(__name__)

"""
博库原新闻爬取
"""


class CnBlogsSpider(scrapy.Spider):
    name = 'cnblogs'
    allowed_domains = ['news.cnblogs.com']
    start_urls = ['http://news.cnblogs.com/']

    # 新闻分页列表解析
    # https://news.cnblogs.com/
    # https://news.cnblogs.com/n/page/1/
    # https://news.cnblogs.com/n/page/2/
    #
    # 新闻详情解析
    # https://news.cnblogs.com/n/685838/

    # 爬取入口
    def parse(self, response):
        page_num = response.meta.get("pageNum", 1)
        # print("正在爬取第 {} 页：".format(page_num))
        #
        # 新闻列表
        news_block_list = response.xpath("//div[@id='news_list']/div[@class='news_block']")
        for news_block in news_block_list:
            news_block_href = news_block.xpath("div[2]/h2/a/@href").extract()[0]
            news_block_url = parse.urljoin(response.url, news_block_href)
            yield Request(url=news_block_url,
                          meta={"pageNum": page_num, "articleUrl": news_block_url},
                          callback=self.load_news_block)
        #
        # 分页列表
        current_page = response.xpath("//div[@class='pager']/a[contains(@class, 'current')]")
        if current_page:
            next_page = current_page.xpath("following-sibling::a[1]")
            if next_page:
                next_page_num = next_page.xpath("text()").extract()[0]
                if "Next >" != next_page_num:
                    next_page_url = parse.urljoin(response.url, "/n/page/{}".format(next_page_num))
                    yield Request(url=next_page_url, meta={"pageNum": next_page_num}, callback=self.parse)

    # 新闻详情
    def load_news_block(self, response):
        page_num = response.meta.get("pageNum", None)
        article_url = response.meta.get("articleUrl", None)
        logger.info("正在爬取第 {} 页，文章链接：{}".format(page_num, article_url))
        # 标题
        title = response.xpath("//div[@id='news_title']/a/text()").extract()[0]
        # 内容
        content = response.xpath("//div[@id='news_content']").extract()[0]
        # 创建时间
        create_time = response.xpath("//div[@id='news_info']/span[@class='time']/text()").extract()[0]
        match_re = re.match(".*?(\\d+.*)", create_time)
        if match_re:
            create_time = match_re.group(1)
        #
        item = CnblogsItem()
        item['article_url'] = article_url
        item['title'] = title
        item['content'] = content
        item['create_time'] = create_time
        yield item
