# 【一】方案一：保存到本地文件
# 在抓取数据成功之后 写一个函数保存到本地
'''
def save(self, data):
    file_path = os.path.join(os.path.dirname(__file__), "data.json")
    with open(file_path, "w", encoding="utf8") as fp:
        json.dump(obj=data, fp=fp, ensure_ascii=False)
'''
# 【二】方案二：保存到本地(配合命令行)
# 前提是 数据要求是 [{"key":"value"}]
'''
# 创建一个列表
article_list = [] 

# 添加字典格式的数据到列表中
article_list.append({
            "title": title,
            "detail_url": detail_url,
            "desc": desc,
            "author_name": author_name,
            "author_blog": author_blog,
            "comment_num": comment_num,
            "up_num": up_num,
            "visit_num": visit_num,
        })
        
# 将列表返回出去
return article_list
'''
# 【1】保存为本地的json文件
# scrapy crawl your_spider_name -o output.json
# scrapy crawl cnblog -o blog_data.json
# 【2】保存为本地的csv文件
# scrapy crawl your_spider_name -o blog_data.csv -t csv
# scrapy crawl cnblog -o blog_data.csv -t csv

"""
    # 解析响应的函数
    # 【二】解析方式二：通过 Xpath 选择器进行解析数据
    def parse(self, response):
        # 【1】获取所有的文章列表
        article_list_all = response.xpath('//*[@id="post_list"]/article')
        # 【2】遍历每一篇文章获取到文章详情内容
        article_dict = {}
        article_list = []
        for article_obj in article_list_all:
            try:
                # （3）文章标题
                title = article_obj.xpath('./section/div/a/text()').extract_first()
                # （4）文章详情链接
                detail_url = article_obj.xpath('./section/div/a/@href').extract_first()
                # （5）文章简介内容
                try:
                    desc = article_obj.xpath('./section/div/p/text()').extract()[1].strip()
                except Exception as e:
                    desc = ""
                # （6）作者名字
                author_name = article_obj.xpath('./section/footer/a[1]/span/text()').extract_first()
                # （7）作者主页地址
                author_blog = article_obj.xpath('./section/footer/a[1]/@href').extract_first()
                # （8）评论数
                comment_num = article_obj.xpath('./section/footer/a[2]/span/text()').extract_first()
                # （9）点赞数
                up_num = article_obj.xpath('./section/footer/a[3]/span/text()').extract_first()
                # （10）观看数
                visit_num = article_obj.xpath('./section/footer/a[4]/span/text()').extract_first()
                article_dict[title] = {
                    "title": title,
                    "detail_url": detail_url,
                    "desc": desc,
                    "author_name": author_name,
                    "author_blog": author_blog,
                    "comment_num": comment_num,
                    "up_num": up_num,
                    "visit_num": visit_num,
                }
                article_list.append({
                    "title": title,
                    "detail_url": detail_url,
                    "desc": desc,
                    "author_name": author_name,
                    "author_blog": author_blog,
                    "comment_num": comment_num,
                    "up_num": up_num,
                    "visit_num": visit_num,
                })
            except Exception as e:
                print(f"{title} :&gt;&gt;&gt;&gt; {e}")
        self.save(data=article_dict)
        print(f"article_list :&gt;&gt;&gt;&gt; {article_list}")
        return article_list

    def save(self, data):
        file_path = os.path.join(os.path.dirname(__file__), "data.json")
        with open(file_path, "w", encoding="utf8") as fp:
            json.dump(obj=data, fp=fp, ensure_ascii=False)
"""
