# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html


# useful for handling different item types with a single interface
from itemadapter import ItemAdapter
import xlrd
import xlwings as xw


class ScrapyCsdnPipeline:

    def open_spider(self, spider):
        self.total = 0
        self.app = xw.App(visible=False, add_book=False)
        self.app.display_alerts = True  # 显示警报()
        self.app.screen_updating = False  # 屏幕更新(应用中)
        # 文件位置：filepath，打开test文档，然后保存，关闭，结束程序
        filepath = r'E:\oldData\资料库\python笔记\爬虫\代码\scrapy_csdn\scrapy_csdn\spiders\excel\csdn001.xlsx'
        self.wb = self.app.books.open(filepath)
        self.wb.sheets['sheet1'].range('A1').value = ["文章来源", "文章标题", "文章封面", "文章类型", "作者昵称", "作者ID", "文章描述",
                                                      "创作时间", "观看数量", "收藏数量", "备注", "文章内容"]

    def process_item(self, item, spider):
        # print(item["article"].get("title"))
        # 获取使用的行数和列数
        info = self.wb.sheets['sheet1'].used_range
        nrows = info.last_cell.row
        nrows += 1
        article = item["article"]
        self.wb.sheets['sheet1'].range('A' + str(nrows)).value = [article.get("origin"),
                                                                  article.get("title"),
                                                                  article.get("pictures"),
                                                                  article.get("type"),
                                                                  article.get("author_name"),
                                                                  article.get("author_id"),
                                                                  article.get("description"),
                                                                  article.get("create_date"),
                                                                  article.get("watch_num"),
                                                                  article.get("collect_num"),
                                                                  article.get("remark"),
                                                                  article.get("content")]
        self.total = self.total + 1
        return item

    # 在爬虫文件执行完之后  执行的方法
    def close_spider(self, spider):
        print("总数：", self.total)
        self.wb.save()
        self.wb.close()
        self.app.quit()
        self.app.kill()
