import scrapy
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
from jianshu_spider.items import JianshuSpiderItem
from jianshu_spider.utils.common import extract_num,extract_qian_num


class JianshuSpider(CrawlSpider):
    name = "jianshu"
    allowed_domains = ["jianshu.com", "jianshu.io"]
    start_urls = ["https://www.jianshu.com/p/fc890ed5083c"]

    rules = (
        Rule(LinkExtractor(allow=r".*/p/[0-9a-z]{12}.*"), callback="parse_detail", follow=False),
    )

    def parse_detail(self, response):
        title = response.xpath("//section[1]/h1/text()").get()
        content = response.xpath("//section[1]/article").get()
        avatar = response.xpath('//a[@class="_1qp91i _1OhGeD"]/img/@src').get()
        author = response.xpath('//span[@class="FxYr8x"]/a/text()').get()
        pub_time = response.xpath('//div[@class="s-dsoj"]//time/text()').get()
        url = response.url
        url1 = url.split('?')[0]
        article_id = url1.split('/')[-1]
        word_count = response.xpath("//section[1]/div[2]/div/div/div[2]/span[2]/text()").get(default="0")
        read_count = response.xpath("//section[1]/div[2]/div/div/div[2]/span[3]/text()").get(default="0")

        comment_count = response.xpath("//*[@id='note-page-comment']/section/h3[2]/div[1]/span[2]/text()").get(default="0")
        like_count = response.xpath(
            "//*[@id='__next']/div[1]/div/div[1]/section[1]/div[5]/div[1]/div[1]/span/text()").get(default="0")
        # 列表

        subjectsList = response.xpath("//div[@class='_2Nttfz']/a/span/text()").getall()
        # 转换字符串
        subjects = ",".join(subjectsList)

        item = JianshuSpiderItem(
            title=title,
            avatar=avatar,
            author=author,
            pub_time=pub_time,
            origin_url=response.url,
            article_id=article_id,
            content=content,
            word_count=word_count,
            read_count=read_count,
            comment_count=comment_count,
            like_count=like_count,
            subjects=subjects
        )
        yield item
