import scrapy
from urllib import request
from script_test.settings import COOKIE
import jsonpath
import json
import re
from w3lib.html import remove_tags
from script_test.items import CnblogsItem

class Csdn(scrapy.Spider):
    name = "csdn"
    start_urls =["https://www.csdn.net/"]
    base_url = 'https://www.csdn.net/api/articles?type=more&category=%s&shown_offset=%s'

    #进入首页提取每个分类的链接
    def parse(self, response):
        url_list = response.css('div.nav_com a::attr(href)').extract()[3:]
        for u in url_list:
            url =request.urljoin(response.url,u)
            yield scrapy.Request(url,callback=self.parse_offset)

    def parse_offset(self,response):
        types = (response.url).split("/")[-1]
        offset = response.css('ul#feedlist_id ::attr(shown-offset)').extract()[0]
        fullurl = self.base_url%(types,offset)
        #发送api获取json找到每条文章的详情url
        yield  scrapy.Request(fullurl,callback=self.parse_api,cookies=COOKIE)

    def parse_api(self,response):
        j = response.text
        pat = re.compile(r'category=(.+?)&')
        types = pat.findall(response.url)[0]
        offset = jsonpath.jsonpath(json.loads(j),'$.shown_offset')[0]
        full = self.base_url%(types,offset)
        if offset != "0":
            yield scrapy.Request(full,callback=self.parse_api,cookies=COOKIE)

        #详情链接
        info_url = jsonpath.jsonpath(json.loads(j),'$..url')
        for u in info_url:
            yield  scrapy.Request(u,callback=self.parse_info)

    def parse_info(self,response):
        item = CnblogsItem()
        info_url = response.url
        title = response.css('h1.title-article::text').extract()[0]
        author = response.css('h2.title-blog a::text').extract()[0]
        time = response.css('span.time::text').extract()[0]
        comment = 0
        view = response.css('span.read-count::text').extract()[0]
        diggnum = response.css('button.btn-like p::text').extract()
        article = response.css('div#article_content').extract()[0]
        article = remove_tags(article,keep=("div","p","span"))
        # 写入数据
        item["info_url"] = info_url
        item["title"] = title
        item["author"] = author
        item["time"] = time
        item["comment"] = comment
        item["view"] = view
        item["diggnum"] = diggnum
        item["article"] = article

        yield item







