# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html


# useful for handling different item types with a single interface
from itemadapter import ItemAdapter

#钩子（方法）函数 我们不会调用 scrapy会调用
import openpyxl
from pymysql import Connection

class DbPipeline:
    def __init__(self):
        self.conn=Connection(
                host='localhost',  # 主机名 ip地址
                port=3306,  # 端口 默认3306
                user='root',  # 账户名
                password='jijie0418',
                database='spider'
        )
        self.cursor = self.conn.cursor()
    def close_spider(self, spider):
        #插入操作需要提交
        self.conn.commit()
        self.conn.close()

    def process_item(self, item, spider):
        title = item.get('title', '')
        rank = item.get('rank', 0)
        subject = item.get('subject', '')
        duration = item.get('duration', 0)
        num_evaluated = item.get('num_evaluated', 0)
        self.cursor.execute(
            'insert into top_movie (title,rating,subject,duration,num_evaluated) values(%s,%s,%s,%s,%s)',
            (title,rank,subject,duration,num_evaluated)
        )
        return item
#批量处理
# class DbPipeline2:
#     def __init__(self):
#         self.conn = Connection(
#             host='localhost',
#             port=3306,
#             user='root',
#             password='jijie0418',
#             database='spider'
#         )
#         self.cursor = self.conn.cursor()
#         self.batch_size = 100  # 批量插入的数量阈值
#         self.batch_data = []  # 存储待插入的数据
#
#     def close_spider(self, spider):
#         if self.batch_data:
#             self.insert_data()  # 处理剩余的数据
#         self.conn.commit()
#         self.conn.close()
#
#     def process_item(self, item, spider):
#         title = item.get('title', '')
#         rank = item.get('rank', 0)
#         subject = item.get('subject', '')
#         self.batch_data.append((title, rank, subject))
#
#         if len(self.batch_data) >= self.batch_size:
#             self.insert_data()  # 达到批量插入的数量阈值时执行插入操作
#
#         return item
#
#     def insert_data(self):
#         self.cursor.executemany(
#             'insert into top_movie (title, rating, subject) values (%s, %s, %s)',
#             self.batch_data
#         )
#         self.conn.commit()
#         self.batch_data = []  # 清空待插入的数据

class ExcelPipeline:
    def __init__(self):
        self.wb = openpyxl.Workbook()
        #默认的工作表
        self.ws = self.wb.active
        self.ws.title = 'Top250'
        self.ws.append(['标题', '评分', '主题','时长',' 评价人数'])

    def open_spider(self, spider):
        pass

    def close_spider(self, spider):
        self.wb.save('电影数据.xlsx')

    def process_item(self, item, spider):
        #避免空值报错
        title = item.get('title','')
        rank = item.get('rank',0)
        subject = item.get('subject','')
        duration = item.get('duration',0)
        num_evaluated = item.get('num_evaluated',0)
        self.ws.append((title,rank,subject, duration, num_evaluated))
        return item
