# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html


# useful for handling different item types with a single interface
from itemadapter import ItemAdapter
import pymysql


class DoubanfilePipeline:
    # f = None
    db = None
    cursor = None
    def open_spider(self, spider):
        # self.f = open('./db.txt', 'w')
        # 判断当前运行的是否为db爬虫，不是db爬虫则下面代码不执行
        # 当前仅限于一个scrapy下有多个爬虫工程
        if spider.name == 'db':
            self.db = pymysql.connect(host='127.0.0.1', port=3306, db='douban', user='root', passwd='1234',
                                      charset='utf8')
            self.cursor = self.db.cursor()

    def process_item(self, item, spider):
        # self.f.write(item['img_url'] + '\n')
        # self.f.write(item['name'] + '\n')
        # self.f.write(item['to_star'] + '\n')

        # 判断当前运行的是否为db爬虫，不是db爬虫则下面代码不执行
        # 当前仅限于一个scrapy下有多个爬虫工程
        if spider.name == 'db':
            try:
                sql = f'insert into douban(img_src, name, to_star) values("{item["img_src"]}", "{item["name"]}", "{item["to_star"]}")'
                self.cursor.execute(sql)
                self.db.commit()
            except Exception as e:
                print(e)
                print(sql)
                self.db.rollback()
                return item

    def close_item(self, spider):
        # 关闭数据库连接
        self.db.close()
