# -*- coding: utf-8 -*-

import scrapy
import re
from spider.items import PianshenItem


class PianshenSpider(scrapy.Spider):
    name = 'pianshen'
    article_ids = []
    max_num = 10000
    allowed_domains = ['pianshen.com']
    start_urls = ['https://www.pianshen.com']
    base_url = 'https://www.pianshen.com'

    def __init__(self, max_num=None, *args, **kwargs):
        super(PianshenSpider, self).__init__(*args, **kwargs)
        if max_num:
            self.max_num = max_num

    def start_requests(self):
        for i in range(1, 11):
            url = 'https://www.pianshen.com/list/%s/' % (i,)
            yield scrapy.Request(url=url, callback=self.parse_list_url)

    def parse_list_url(self, response):
        """
        分析点赞数量最高的链接，拿到文章地址
        :param response:
        :return:
        """
        article_list = response.xpath("//div[contains(@class, 'loop-wrap')]")
        for article_item in article_list:
            uri = article_item.xpath(".//div[contains(@class, 'loop-thumb')]//a/@href").extract_first()
            url = self.base_url + uri
            # item['url'] = 'https://www.pianshen.com/article/18352052923/'
            # item['article_id'] = re.findall(r"article/(.+)/", url)[0]
            # thumb_url = article_item.xpath(".//div[contains(@class, 'loop-thumb')]//a/img/@src").extract_first()
            # item['thumb'] = self.base_url + thumb_url
            # item['summary'] = article_item.xpath(".//div[contains(@class, 'mh-excerpt')]//p/text()").extract_first()
            yield scrapy.Request(url=url, callback=self.parse_article_url, cb_kwargs={'url': url})

    def parse_article_url(self, response, url):
        item = PianshenItem()
        item['url'] = url
        content_nodes = response.xpath("//div[contains(@class, 'markdown_views')]/node()").extract()
        if not content_nodes:
            content_nodes = response.xpath("//div[contains(@class, 'htmledit_views')]/node()").extract()
        elif not content_nodes:
            content_nodes = response.xpath("//div[contains(@class, 'post-body')]/node()").extract()
        elif not content_nodes:
            content_nodes = response.xpath("//div[contains(@class, 'toc')]/node()").extract()
        content_nodes = [i for i in content_nodes if i not in ['\n']]
        item['content'] = ''.join(content_nodes)
        item['article_id'] = re.findall(r"article/(.+)/", item['url'])[0]
        item['title'] = response.xpath("//h2/span/text()").extract_first()
        if item['article_id'] not in self.article_ids:
            self.article_ids.append(item['article_id'])
        yield item
        if self.article_ids.__len__() > self.max_num:
            return
        related_articles = response.xpath("//div/ul/li/a[re:match(@href, '^/article/[0-9]+/$')]/@href").extract()
        for article_uri in related_articles:
            related_url = self.base_url + article_uri
            yield scrapy.Request(url=related_url, callback=self.parse_article_url, cb_kwargs={'url': related_url})
