# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html


# useful for handling different item types with a single interface
import time
from itemadapter import ItemAdapter
import openpyxl
import sqlite3


# 保存爬虫资料到 Excel
class ExcelPipeline:
    # 初始化 Pipeline，创建 Workbook、取得 active sheet、加入表头
    def __init__(self) -> None:
        self.start_time = time.time()   # 记录开始时间
        self.wb = openpyxl.Workbook()
        self.ws = self.wb.active
        self.ws.title = "Top250"
        self.ws.append(("标题", "评分", "主题", "时长", "简介"))

    # 结束爬虫，保存 Excel 档案
    def close_spider(self, spider):
        self.wb.save("豆瓣电影排名.xlsx")
        # 打印'开始时间'和'处理时长'
        self.end_time = time.time()   # 记录结束时间
        print(f'ExcelPipeline 开始-结束时间:{time.strftime("%H:%M:%S",time.localtime(self.start_time))}-{time.strftime("%H:%M:%S",time.localtime(self.end_time))}, 处理时长:{round(self.end_time-self.start_time,2)}秒')
        
    # 处理管道，写入爬取数据
    def process_item(self, item, spider):
        title = item.get("title", "")
        rank = item.get("rank", "")
        subject = item.get("subject", "")
        duration = item.get("duration", 0)
        intro = item.get("intro", "")
        self.ws.append((title, rank, subject, duration, intro))
        return item


# 保存爬虫资料到 SQLite3 数据库
class SQLitePipeline:
    # 初始化 Pipeline，创建数据库连线、取得 cursor
    def __init__(self) -> None:
        self.start_time = time.time()   # 记录开始时间
        self.con = sqlite3.connect("../data/test.db")
        self.cursor = self.con.cursor()
        self.data = []
        self.create_table()

    # 结束爬虫，关闭数据库连线
    def close_spider(self, spider):
        if len(self.data)>0:
            self._write_todb()
            self.data.clear()
        self.con.close()
        # 打印'开始时间'和'处理时长'
        self.end_time = time.time()   # 记录结束时间
        print(f'SQLitePipeline 开始-结束时间:{time.strftime("%H:%M:%S",time.localtime(self.start_time))}-{time.strftime("%H:%M:%S",time.localtime(self.end_time))}, 处理时长:{round(self.end_time-self.start_time,2)}秒')
        
    # 处理管道，写入爬取数据
    def process_item(self, item, spider):
        title = item.get("title", "")
        rank = item.get("rank", "")
        subject = item.get("subject", "")
        duration = item.get("duration", 0)
        intro = item.get("intro", "")
        self.data.append((title, rank, subject, duration, intro))
        if len(self.data) == 100:
            self._write_todb()
            self.data.clear()
        return item
    
    # 批量写入数据库
    def _write_todb(self):
        self.cursor.executemany(
            'insert into tb_doban_movie(title, rank, subject, duration, intro) values(?, ?, ?, ?, ?)',
            self.data
        )
        self.con.commit()

    # 建立数据表格，可以使用 select * from tb_doban_movie; 查询数据
    def create_table(self):
        query = """
            drop table if exists tb_doban_movie;
        """
        self.cursor.execute(query)
        self.con.commit()
        query = """
            CREATE TABLE tb_doban_movie (
                title TEXT NOT NULL,
                rank REAL NOT NULL,
                subject TEXT NOT NULL,
                duration REAL NOT NULL,
                intro TEXT,
                PRIMARY KEY (title)
            );
        """
        self.cursor.execute(query)
        self.con.commit()