import scrapy
import json
import jsonpath
from scrapy_csdn.items import ScrapyCsdnItem


class Tuijian2Spider(scrapy.Spider):
    name = 'tuijian2'
    allowed_domains = ['blog.csdn.net', 'cms-api.csdn.net']
    start_urls = ['https://blog.csdn.net/']

    baseUrl = "https://cms-api.csdn.net/v1/web_home/select_content?componentIds=www-blog-recommend"
    openUrl = "https://cms-api.csdn.net/v1/web_home/select_content?componentIds=www-blog-recommend&cate1={}"
    houduan = "back-end"  # 后端
    web = "web"  # 前端
    mobile = "mobile"  # 移动
    lang = "lang"  # 语言
    java = "java"  # java
    python = "python"  # python
    ai = "ai"  # ai
    bigData = "big-data"  # 大数据
    alg1 = "algo"  # 数据结构和算法
    alg2 = "avi"  # 音视频
    alg3 = "cloud-native"  # 云原生
    alg4 = "cloud"  # 云平台
    alg5 = "advanced-technology"  # 前言技术
    alg6 = "open-source"  # 开源
    alg7 = "ops"  # 运维
    alg8 = "server"  # 服务器
    alg9 = "os"  # 操作系统
    alg10 = "hardware"  # 硬件开发
    alg11 = "embedded"  # 嵌入式
    alg12 = "microsoft"  # 微软技术
    alg13 = "software-engineering"  # 测试
    alg14 = "sec"  # 网络空间安全
    alg15 = "telecommunication"  # 网络与通讯
    alg16 = "design"  # 用户体验设计
    alg17 = "job"  # 学习和成长
    alg18 = "search"  # 搜索
    alg19 = "devtools"  # 开发工具
    alg20 = "game"  # 游戏
    alg21 = "harmonyos"  # harmonyos
    alg22 = "blockchain"  # 区块链
    alg23 = "math"  # 数学
    urlArr = [houduan, web, mobile, lang, java, python, ai, bigData, alg1, alg2, alg3, alg4, alg5, alg6, alg7, alg8,
              alg9, alg10, alg11, alg12, alg13, alg14, alg15, alg16, alg17, alg18, alg19, alg20, alg21, alg22, alg23]
    detail = "article/details"
    page = 0
    pageSize = 100

    def parse(self, response):
        if response.url == self.baseUrl or response.url.find(self.baseUrl) != -1:
            obj = json.loads(response.text)
            code = jsonpath.jsonpath(obj, "$.code")
            message = jsonpath.jsonpath(obj, "$.msg")
            data = jsonpath.jsonpath(obj, "$.data.www-blog-recommend.info")
            print(code[0], message[0], str(len(data[0])) + "个数据", response.meta.get("type"), response.meta.get("page"))
            if data is None or data[0] is None or len(data[0]) == 0:
                return
            # 定义文章对象
            for blog in data[0]:
                pic = "https://blog-1305261271.cos.ap-shanghai.myqcloud.com/wangEdit/1/167317948159748"
                blog = blog.get("extend")
                if blog.get("picList") is not None and len(blog.get("picList")) > 0:
                    pic = blog.get("picList")[0]
                article = {
                    "origin": blog.get("url"),
                    "pictures": pic,
                    "title": blog.get("title"),
                    "author_name": blog.get("nickname"),
                    "description": blog.get("desc"),
                    "author_id": 1,
                    "create_date": "",
                    "watch_num": 0,
                    "collect_num": 0,
                    "type": 0,
                    "content": "",
                    "remark": json.dumps(blog)
                }
                yield scrapy.Request(url=article["origin"], callback=self.parse, dont_filter=True,
                                     meta={"article": article})
        elif response.url.find(self.detail) != -1:
            header = response.xpath("//div[@class='blog-content-box']//div[@class='article-header']//div["
                                    "@class='bar-content']")[0]
            # 获取创建时间
            date = header.xpath("//span[@class='time']/text()").get()
            newdate = ""
            if date.startswith("已于"):
                newdate = date[3:22:1]
            else:
                newdate = date[2:21:1]
            # 获取观看数量
            read_count = header.xpath("//span[@class='read-count']/text()").get()
            # 获取收藏数量
            collect_num = header.xpath("normalize-space(//span[@class='get-collection']/text())").get()
            if collect_num == "":
                collect_num = 0
            # 获取标签
            typevm = response.xpath("//div[@class='blog-content-box']//div[@class='article-header']//div["
                                    "@class='article-info-box']/div[@class='blog-tags-box']")[0]
            # types = typevm.xpath("//span[@class='label' and contains(text(), '文章标签')]/following-sibling::a[N]/text()")
            typesList = typevm.xpath("//a[@class='tag-link' and @data-report-click]/text()").getall()
            type = ""
            for item in typesList:
                type += (item + ",")
            type = type[0:len(type) - 1:1]
            # 获取文章的主体
            content = response.xpath("//div[@class='blog-content-box']/article[@class='baidu_pl']")[0]
            # 补充文章的字段
            article = response.meta["article"]
            article["type"] = type
            article["collect_num"] = collect_num
            article["create_date"] = newdate
            article["watch_num"] = read_count
            article["content"] = content.extract()
            yield ScrapyCsdnItem(article=article)

        else:
            for j in self.urlArr:
                page = 0
                for i in range(20):
                    page = page + 1
                    url = self.openUrl.format(j)
                    yield scrapy.Request(url=url, callback=self.parse, meta={"type": j, "page": page},
                                         dont_filter=True)
