# -*- coding: utf-8 -*-
import scrapy
import re
from scrapy.http import Request
from urllib import parse
from datetime import datetime
from bolezaixian.items import BolezaixianItem
from bolezaixian.utlis.common import get_md5


class JobboleSpider(scrapy.Spider):
    name = 'jobbole'
    allowed_domains = ['blog.jobbole.com']
    start_urls = ['http://blog.jobbole.com/all-posts/']

    def parse(self, response):

        # 获取当前页面所有的文章列表连接
        # url_list = response.xpath('//a[@target="_blank"]/@href').extract() # 得到一个列表
        # url_list = response.css('.archive-title::attr(href)').extract()  #
        # url_list = response.xpath('//a[@target="_blank"]')
        # 获取当前页所有文章列表的a节点
        # a_nodes = response.xpath('//a[@target="_blank"]')
        # a_nodes = response.css('a[target="_blank"]')
        # 当属性之中有空格时[@class="post floated-thumb"]，css('".post floated-thumb" div[1] a']
        a_nodes = response.xpath('//div[@class="post floated-thumb"]/div[1]/a')
        # response.css('div[class="post floated-thumb"]').extract()[0] # 只得到div

        for node in a_nodes:
            # 获取封面图链接
            front_image_url = node.css('img::attr(src)').extract_first()
            # 获取文章连接
            port_url = node.css('::attr(href)').extract_first()
            # url: 返回下一步请求的url;
            # callback: 返回的response交给self.par_detail处理
            # meta: 携带数据，返回给response, 即self.par_detail函数中的response
            print(response.url)
            yield Request(url=parse.urljoin(response.url, port_url), meta={'front_image_url': front_image_url},
                          callback=self.pare_detail)
        # 提取下一页的ur
        # 该节点同时有两个class属性next和page-numbers；
        # extract_first(''): 获取列表的第一个元素，默认为空
        next_url = response.css('.next.page-numbers::attr(href)').extract_first('')
        if next_url:
            # 构造请求url, 返回的response交给self.parse处理
            yield Request(url=parse.urljoin(response.url, next_url), callback=self.parse)

    def pare_detail(self, response):
        """文章数据提取"""
        # xpath
        # 注意在用属性选择器的时候尽量选择class属性，因为id属性选择器可扩展性不强
        # 即id选择器只对当前页有效，对其他页面可能就没有这个id,就可能报错
        # //*[@id="post-113735"]/div[1]
        # 获取标题
        title = response.xpath("//div[@class='entry-header']/h1/text()").extract()[0]
        # 获取日期
        crat_time = response.xpath('//p[@class="entry-meta-hide-on-mobile"]/text()').extract()[0]
        # 删除空白字符
        crat_time = crat_time.strip()
        # 删除点
        crat_time = crat_time.strip('·')
        # 删除空白字符
        crat_time = crat_time.strip()
        # 获取关键字标签
        tags = response.xpath("//p[@class='entry-meta-hide-on-mobile']/a/text()").extract()
        # tags = response.css("p.entry-meta-hide-on-mobile a::text")
        if tags:
            tags = ','.join(tags)
        else:
            tags = ''

        # 获取点赞数
        like_nums = response.xpath('//span[@class=" btn-bluet-bigger href-style vote-post-up   register-user-only "]/h10/text()').extract()[0]
        # 收藏数
        collection_nums = response.xpath('//span[@class=" btn-bluet-bigger href-style bookmark-btn  register-user-only "]/text()').extract()[0]
        ret = re.match(r'.*?(\d+).*?', collection_nums)
        if ret:
            collection_nums = int(ret.group(1))
        else:
            collection_nums = 0

        # 获取评论
        comment_nums = response.xpath('//span[@class="btn-bluet-bigger href-style hide-on-480"]/text()').extract()[0]
        # re.findall得到的是列表
        comment_nums = re.findall(r'.*(\d).*?', comment_nums)
        if comment_nums:
            comment_nums = int(comment_nums[0])
        else:
            comment_nums = 0
        # 获取封面图url
        front_img_url = response.meta.get('front_image_url', '')
        # 获取文章内容
        content = response.xpath('//div[@class="entry"]').extract()
        # 创建item对象
        bolezaixian = BolezaixianItem()
        bolezaixian['title'] = title
        try:
            crat_time = datetime.strptime(crat_time, '%Y/%m/%d')
        except Exception as e:
            crat_time = datetime.now()
        bolezaixian['crat_time'] = crat_time
        bolezaixian['url'] = response.url
        bolezaixian['url_object_id'] = get_md5(response.url)
        bolezaixian['front_img_url'] = [front_img_url]
        bolezaixian['comment_nums'] = comment_nums
        bolezaixian['collection_nums'] = collection_nums
        bolezaixian['tags'] = tags
        bolezaixian['content'] = content

        # 返回存储对象
        yield bolezaixian