# -*- coding: utf-8 -*-
import scrapy
import re
from scrapy import Request
import urlparse
from ArticleSpider.items import ArticleItem,ArticleItemLoader
from ArticleSpider.utils.common import get_md5
from scrapy.loader import ItemLoader


class JobboleSpider(scrapy.Spider):
    name = 'jobbole'
    allowed_domains = ['blog.jobbole.com']
    start_urls = ['http://blog.jobbole.com/all-posts/']

    def parse(self, response):
        post_nodes = response.xpath("//*[@id='archive']/div/div/a")
        for post_node in post_nodes:
            post_url = post_node.xpath('@href').extract_first('')
            image_url = post_node.xpath('img/@src').extract_first('')
            yield Request(url=urlparse.urljoin(response.url, post_url), meta={'front_image_url': image_url},callback=self.parse_detail)
        next_urls = response.xpath("//a[contains(@class,'next')]/@href").extract_first()

        # if next_urls:
        #     yield Request(url=urlparse.urljoin(response.url, next_urls), callback=self.parse)

    def parse_detail(self, response):
        # article_item = ArticleItem()
        # title = response.xpath("//*[@class='entry-header']/h1/text()").extract()[0].encode('utf-8')
        # create_date = response.xpath("//*[@class='entry-meta-hide-on-mobile']/text()").extract()[0].encode('utf-8').replace('·','').strip()
        # praise_nums = response.xpath("//span[contains(@class,'vote-post-up')]/h10/text()").extract()[0].encode('utf-8')
        # fav_nums = response.xpath("//span[contains(@class,'bookmark-btn')]/text()").extract()[0].encode('utf-8')
        # match_re = re.match('.*?(\d+).*', fav_nums)
        # if match_re:
        #     fav_nums = int(match_re.group(1))
        # else:
        #     fav_nums = 0

        # comment_nums = response.xpath("//a[@href='#article-comment']/span/text()").extract()[0].encode('utf-8')
        # match_re = re.match('.*?(\d+).*', comment_nums)
        # if match_re:
        #     comment_nums = int(match_re.group(1))
        # else:
        #     comment_nums = 0

        # content = response.xpath("//div[@class='entry']").extract()[0]
        # tags_list = response.xpath("//*[@class='entry-meta-hide-on-mobile']/a/text()").extract()
        # tag = ",".join(tags_list[2:]).encode('utf-8')
        # front_image_url = response.meta.get('front_image_url')


        # article_item['title'] = title
        # article_item['url'] = response.url
        # article_item['url_object_id'] = get_md5(response.url)
        # article_item['front_image_url'] = [front_image_url]
        # # article_item['front_image_path'] =
        # article_item['create_date'] = create_date
        # article_item['praise_nums'] = praise_nums
        # article_item['fav_nums'] = fav_nums
        # article_item['comment_nums'] = comment_nums
        # article_item['content'] = content
        # article_item['tag'] = tag


        # 通过自定义的item loader来加载item
        front_image_url = response.meta.get('front_image_url')
        item_loader = ArticleItemLoader(item=ArticleItem(),response=response)

        # 三个比较重要的方法，add_xpath, add_css,add_value
        item_loader.add_xpath("title","//*[@class='entry-header']/h1/text()")
        item_loader.add_value("url",response.url)
        item_loader.add_value("url_object_id",get_md5(response.url))
        item_loader.add_value("front_image_url",[front_image_url])
        item_loader.add_xpath("create_date","//*[@class='entry-meta-hide-on-mobile']/text()")
        item_loader.add_xpath("praise_nums","//span[contains(@class,'vote-post-up')]/h10/text()")
        item_loader.add_xpath("fav_nums","//span[contains(@class,'bookmark-btn')]/text()")
        item_loader.add_xpath("comment_nums","//a[@href='#article-comment']/span/text()")
        item_loader.add_xpath("content","//div[@class='entry']")
        item_loader.add_xpath("tag","//*[@class='entry-meta-hide-on-mobile']/a/text()")
        article_item = item_loader.load_item()


        yield article_item



