# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
import pymysql
# useful for handling different item types with a single interface
from itemadapter import ItemAdapter
import openpyxl
from scrapy import item
from datetime import datetime


class DoubanPipeline(object):

    def __init__(self):
        print('pipline init.............')
        super().__init__()
        self.wb = openpyxl.Workbook()
        self.ws = self.wb.active
        self.ws.append(['书名', '出版信息', '评分'])

        # 初始化数据库连接
        # self.conn = pymysql.connect(host='47.94.134.3', user='haidou_test', password='Xso7kDj6MZh&7T43', db='message_touch',
        #                             charset='utf8mb4')
        # self.cursor = self.conn.cursor()

    def process_item(self, book, spider):
        self.ws.append([book['title'], book['publish'], book['rate']])

        # 查询数据库中是否已存在相同的记录
        # sql_check = "SELECT * FROM douban WHERE title=%s AND publish=%s AND rate=%s"
        # self.cursor.execute(sql_check, (item['title'], item['publish'], item['rate']))
        # result = self.cursor.fetchone()
        #
        # # 如果不存在相同的记录，则插入新记录
        # if not result:
        #     sql_insert = "INSERT INTO douban (title, publish, rate) VALUES (%s, %s, %s)"
        #     self.cursor.execute(sql_insert, (item['title'], item['publish'], item['rate']))
        #     self.conn.commit()

    def close_spider(self, spider):
        current_date = datetime.now().strftime('%Y-%m-%d')
        self.wb.save(filename=f'douban-spider_{current_date}.xlsx')
        self.wb.close()

        # 关闭数据库连接
        # self.cursor.close()
        # self.conn.close()
