# -*- coding: utf-8 -*-
import re
from datetime import datetime
import scrapy
from urllib import parse
from scrapy.http import  Request
from ArticleSpider.items import JobboleArticleItem

from ArticleSpider.utils.common import get_md5
from scrapy.loader import ItemLoader

class JobboleSpider(scrapy.Spider):

    name = 'jobbole'

    allowed_domains = ['python.jobbole.com']
    # http: // blog.jobbole.com / category / career /
    # http://python.jobbole.com/category/guide/
    # http://blog.jobbole.com/all-posts/
    start_urls = ['http://python.jobbole.com/category/guide/']

    def parse(self, response):
        '''
        1.获取文章列表中的文章url并且交给scrapy 下载后并进行解析
        2.获取下一页的url并且交给scrapy下载，下载完成后交给parse
        '''
        post_nodes = response.css('#archive .floated-thumb .post-thumb a')
        # print(len(post_nodes))
        for post_node in post_nodes:
            # 交给scrapy下载
            image_url = post_node.css("img::attr(src)").extract_first("")
            post_url = post_node.css("::attr(href)").extract_first("")
            # print(image_url)
            if not bool(image_url):
                image_url = "NO front-image !"
            # parse.urljoin(response.url, post_url)拼接路径
            yield Request(
                url=parse.urljoin(response.url, post_url),
                meta={"img_path": image_url},
                callback=self.parse_detail
            )


        # 提取下一页url并且交给scrapy下载
        # .next.page-numbers两个class之间没有空格，表示当前元素有两个class
        # 有空格就表示有层级关系
        next_url = response.css(".next.page-numbers::attr(href)").extract_first("")
        if next_url:

            yield Request(url=parse.urljoin(response.url, next_url), callback=self.parse)

    def parse_detail(self, response):
        """"

        title
        author
        content
        publish_addr
        publish_time
        img_path
        tag
        is_valid
        crawl_time

        """

        title = response.xpath('//*[@class="entry-header"]/h1/text()').extract_first()
        publish_time = response.xpath('//*[@class="entry-meta-hide-on-mobile"]/text()').extract_first("").strip().replace("·", '').strip()

        img_path = response.meta["img_path"]
        content = response.css("div.entry").extract_first("")
        # publish_addr = response.css(".copyright-area a::text").extract_first("")
        try:
            publish_time = datetime.strptime(publish_time, "%Y/%m/%d")
        except:
            publish_time = datetime.now().strftime("%Y/%m/%d:%H:%M:%S")

        # author_original = None
        # author_translator = None
        authors = response.css("div.entry > div.copyright-area > a::text").extract()
        # author_translator = response.css("div.entry > div.copyright-area > a:nth-child(2)::text").extract_first("")

        # if authors is None:
        #     author = author_transl
        # else:
        #     author = author_original+" / "+author_translator
        author = ''
        if len(authors)>1:
            # for aut in authors:
            #     author += aut+","
            author = ','.join(authors)
        else:
            author = authors[0]

        tags = response.css("div.entry-meta > p > a::text").extract()


        # post-114488 > div.entry > div.copyright-area > a
        # post-114496 > div.entry > div.copyright-area > a
        # #post-114488 > div.entry-meta > p > a:nth-child(1)
        article_item = JobboleArticleItem()
        article_item["title"] = title
        article_item["author"] = author
        article_item["publish_time"] = publish_time
        article_item["tag"] = ",".join(tags)
        article_item["crawl_time"] = datetime.now().strftime("%Y/%m/%d:%H:%M:%S")
        article_item["content"] = content
        article_item["img_path"] = img_path
        article_item["publish_addr"] = response.url
        article_item["is_valid"] = 1
        article_item["bookmark_num"] = 0
        article_item['point_up_num'] = 0


        # print(article_item)
        # print("sdfsdfasfasf"*66)
        # breakpoint()
        # 通过 item loader 来加载 item,目的是为了便于后期维护
        # 三种方式
        # item_loader.add_xpath()
        # item_loader.add_value()
        # item_loader.add_css()
        # 实例化

        # item_loader = ItemLoader(item=JobboleArticleItem(), response=response)
        #
        # item_loader.add_css("title", "div.entry-header > h1::text")
        # item_loader.add_css("url", "div.entry-header > h1::text")
        # item_loader.add_css("content", "div.entry::text")
        # item_loader.add_css("create_date", "div.entry-meta > p::text")
        # item_loader.add_css("url_obj_id", "div.entry-header > h1::text")
        # item_loader.add_css("fav_num", "div.entry-header > h1::text")
        # item_loader.add_css("praise_num", "div.entry-header > h1::text")
        # item_loader.add_value("front_img_url", "div.entry-header > h1::text")
        # item_loader.add_css("url_obj_id", "div.entry-header > h1::text")

        # article_item = item_loader.load_item()

        # 传递到pipelines.py
        yield article_item
        # print()

