# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html


# useful for handling different item types with a single interface

from itemadapter import ItemAdapter
import pymysql


class FirstspiderPipeline:

    def __init__(self):
        self.conn = None
        self.cursor = None

    def process_item(self, item, spider):
        """

        :param item: 爬虫中yeild回来的对象
        :param spider: 爬虫对象JandanSpider()
        :return:
        """
        try:
            sql = "insert into jandan(url,title) values(%s,%s)"
            ret = self.cursor.execute(sql, [item['url'], item['title']])
            self.conn.commit()
            print(ret)
        except Exception as e:
            print(e)

        return item

    @classmethod
    def from_crawl(cls, crawler):
        """
        初始化的时候，用于创建pipline对象
        :param crawler:
        :return:
        """
        # 从配置文件中取值
        # val = crawler.settings.getint('MMM')
        # return cls(val)
        return cls()

    def open_spider(self, spider):
        print('---------开启爬虫---------')
        self.conn = pymysql.connect(
            host="localhost",
            port=3306,
            database="db1",
            user="root",
            password="123456",
            charset="utf8"
        )
        self.cursor = self.conn.cursor()

    def close_spider(self, spider):
        print('--------关闭爬虫--------')
        self.cursor.close()
        self.conn.close()
