import scrapy
import json
import jsonpath
from scrapy_csdn.items import ScrapyCsdnItem
from scrapy.exceptions import CloseSpider


class TuijianSpider(scrapy.Spider):
    name = 'tuijian'
    allowed_domains = ['blog.csdn.net']
    start_urls = ['https://blog.csdn.net/phoenix/web/v1/home/information?page=1&pageSize=100']

    baseUrl = "https://blog.csdn.net/phoenix/web/v1/home/information?page={page}&pageSize={pageSize}"
    page = 0
    pageSize = 100

    def parse(self, response):
        for i in range(100):
            self.page = self.page + 1
            url = self.baseUrl.format(page=self.page, pageSize=self.pageSize)
            yield scrapy.Request(url=url, callback=self.parseAa, meta={"page": self.page})

    def parseAa(self, response):
        obj = json.loads(response.text)
        code = jsonpath.jsonpath(obj, "$.code")
        message = jsonpath.jsonpath(obj, "$.message")
        trace_id = jsonpath.jsonpath(obj, "$.traceId")
        data = jsonpath.jsonpath(obj, "$.data")

        print(code[0], message[0], trace_id[0], str(len(data[0])) + "个数据", response.meta.get("page"))
        if data is None or data[0] is None or len(data[0]) == 0:
            return

        # 定义文章对象
        for blog in data[0]:
            pic = "https://blog-1305261271.cos.ap-shanghai.myqcloud.com/wangEdit/1/167317948159748"
            if blog.get("picList") is not None and len(blog.get("picList")) > 0:
                pic = blog.get("picList")[0]
            article = {
                "origin": blog.get("url"),
                "pictures": pic,
                "title": blog.get("title"),
                "author_name": blog.get("nickname"),
                "description": blog.get("description"),
                "author_id": 1,
                "create_date": "",
                "watch_num": 0,
                "collect_num": 0,
                "type": 0,
                "content": "",
                "remark": json.dumps(blog)
            }
            yield scrapy.Request(url=article["origin"], callback=self.getDetail, meta={"article": article})

    # 获取文章详细信息
    def getDetail(self, response):
        header = response.xpath("//div[@class='blog-content-box']//div[@class='article-header']//div["
                                "@class='bar-content']")[0]
        # 获取创建时间
        date = header.xpath("//span[@class='time']/text()").get()
        newdate = ""
        if date.startswith("已于"):
            newdate = date[3:22:1]
        else:
            newdate = date[2:21:1]
        # 获取观看数量
        read_count = header.xpath("//span[@class='read-count']/text()").get()
        # 获取收藏数量
        collect_num = header.xpath("normalize-space(//span[@class='get-collection']/text())").get()
        if collect_num == "":
            collect_num = 0
        # 获取标签
        typevm = response.xpath("//div[@class='blog-content-box']//div[@class='article-header']//div["
                                "@class='article-info-box']/div[@class='blog-tags-box']")[0]
        # types = typevm.xpath("//span[@class='label' and contains(text(), '文章标签')]/following-sibling::a[N]/text()")
        typesList = typevm.xpath("//a[@class='tag-link' and @data-report-click]/text()").getall()
        type = ""
        for item in typesList:
            type += (item + ",")
        type = type[0:len(type) - 1:1]
        # 获取文章的主体
        content = response.xpath("//div[@class='blog-content-box']/article[@class='baidu_pl']")[0]
        # 补充文章的字段
        article = response.meta["article"]
        article["type"] = type
        article["collect_num"] = collect_num
        article["create_date"] = newdate
        article["watch_num"] = read_count
        article["content"] = content.extract()

        # articleList.append(article)
        yield ScrapyCsdnItem(article=article)
        # blog = ScrapyCsdnItem(origin=article.get("origin"),
        #                        pictures=article.get("pictures"),
        #                        title=article.get("title"),
        #                        author_name=article.get("author_name"),
        #                        description=article.get("description"),
        #                        author_id=article.get("author_id"),
        #                        create_date=article.get("create_date"),
        #                        watch_num=article.get("watch_num"),
        #                        collect_num=article.get("collect_num"),
        #                        type=article.get("type"),
        #                        content=article.get("content"),
        #                        remark=article.get("remark"))
        # yield blog
