# -*- coding: utf-8 -*-
import scrapy
import re
import datetime
from scrapy.http.request import Request
from urllib import parse
from ..items import ArticleItem
from ..utils.common import get_md5


class JobboleSpider(scrapy.Spider):
    name = 'jobbole_old'
    allowed_domains = ['blog.jobbole.com']
    # 放置爬取的URL列表
    # start_urls = ['http://blog.jobbole.com/']
    start_urls = ['http://blog.jobbole.com/all-posts/']

    # scrapy的下载器会根据Request下载所有的东西，包括url、encoding、body
    # 下载完成之后进入parse里，所以这里是写自定义逻辑的地方，如何将爬取的数据保存下来
    def parse(self, response):
        """
        1、获取文章列表页中的文章url并交给解析函数进行具体字段的解析，也就是下面这块逻辑
        2、当前列表页获取完毕，将获取下一页的url并交给scrapy进行下载，继续解析具体字段
        获取下一页的url并交给scrapy进行下载
        """
        # 解析列表页中所有文字url
        post_nodes = response.css("#archive .post.floated-thumb .post-thumb a")
        for post_node in post_nodes:
            # 如果当前爬取的网页是相对路径的格式，那么就需要这样处理：response.url+post_url
            # 因为scrapy底层使用的是，他是一个异步IO的库，callback是调用了twisted的异步机制
            image_url = post_node.css("img::attr(src)").extract_first("")
            post_url = post_node.css("::attr(href)").extract_first("")
            # image_url单独出来，目的是为了兼容使用cdn和使用本地存储两种方式
            yield Request(url=parse.urljoin(response.url, post_url), meta={"front_image_url": image_url},
                          callback=self.parse_detail)

        # 提取下一页并交给scrapy进行下载
        next_url = response.css(".next.page-numbers::attr(href)").extract_first("")
        if next_url:
            yield Request(url=parse.urljoin(response.url, next_url), callback=self.parse)

    def parse_detail(self, response):
        # 实例化一个item
        article_item = ArticleItem()
        # xpath直接通过浏览器查找并复制来的省力
        # ================爬取标题=================
        title = response.css(".entry-header h1::text").extract_first()
        print("标题{}".format(title))

        # ================爬取创建时间=================
        re_selector_creaste_time = response.css('.entry-meta-hide-on-mobile::text')
        # 如果有回车换行符，需要处理
        create_time = re_selector_creaste_time.extract_first().strip().replace(" ·", "")
        print("创建日期{}".format(create_time))

        # ================爬取点赞数量=================
        re_selector_praise = response.xpath('//span[contains(@class,"vote-post-up")]/h10/text()')
        praise_num = re_selector_praise.extract_first()
        print("点赞数{}".format(praise_num))

        # ================爬取收藏数量=================
        re_selector_collect = response.xpath('//span[contains(@class,"bookmark-btn")]/text()')
        collect = re_selector_collect.extract_first()
        re_match_collect = re.match(".*(\d+).*", collect)
        if re_match_collect:
            collect_num = int(re_match_collect.group(1))
            print("收藏量{}".format(collect_num))
        else:
            collect_num = 0
            print("收藏量{}".format(collect_num))

        # ================爬取评论数量=================
        re_selector_comment = response.xpath('//a[@href="#article-comment"]/span/text()')
        comment = re_selector_comment.extract_first()
        re_match_comment = re.match(".*(\d+).*", comment)
        if re_match_comment:
            comment_num = re_match_comment.group(1)
            print("评论数{}".format(comment_num))
        else:
            comment_num = 0
            print("评论数{}".format(comment_num))

        # ================爬取正文内容==================
        # connect = response.xpath("//div[@class='entry']").extract_first()
        # pattern = re.compile(r'<[^>]+>', re.S)
        # result = pattern.sub('', connect)
        # print("爬取的结果为{}".format(result))

        # =================爬取标签=====================
        tag_list = response.xpath("//p[@class='entry-meta-hide-on-mobile']/a/text()").extract()
        # 列表生成式，重构需要的部分
        tag_list = [element for element in tag_list if not element.strip().endswith("评论")]
        tags = ",".join(tag_list)
        print("标签{}".format(tags))
        # =================爬取图片======================
        from_image_url = response.meta.get("front_image_url", "")

        # 进行item的转换
        article_item["title"] = title
        article_item["url"] = response.url
        article_item["url_object_id"] = get_md5(response.url)
        # 为方便数据库date格式的存储
        try:
            create_time = datetime.datetime.strptime(create_time, "%Y/%m/%d").date()
        except Exception as e:
            create_time = datetime.datetime.now().date()
        article_item["create_time"] = create_time
        article_item["praise_num"] = praise_num
        article_item["collect_num"] = collect_num
        article_item["comment_num"] = comment_num
        article_item["from_image_url"] = [from_image_url]
        article_item["tags"] = tags

        # 会路由到pipelines，需要修改setting的ITEM_PIPELINES
        yield article_item

    pass
