# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html


# useful for handling different item types with a single interface
from itemadapter import ItemAdapter




class SearchblogsPipeline:
    def process_item(self, item, spider):
        return item

# #保存item数据转换成csv文件中去。
# class  CsvItemExporterPipeline(object):
#     def __init__(self, *args):

###保存item到数据库中去

import pymysql
from searchBlogs.items import SearchblogsItem
class MySqlpipeline(object):
    def process_item(self,item,spider):
        self.conn = pymysql.connect(
            host="localhost",
            user="root",
            passwd="root1234",
            database="blogs",
            charset="utf8",
            cursorclass=pymysql.cursors.DictCursor)
        with self.conn:
            with self.conn.cursor() as cursor:
                #先准备好sql语句，然后用item里面的信息去实例化sql语句，之后再执行sql语句，
                insert_sql = """
                    insert into `blog`(`title`,`content`,`pic`,`link`,`date`)value(%s,%s,%s,%s,%s)
                """
                params = list()
                params.append(item.get("title"))
                params.append(item.get("content"))
                params.append(item.get("pic"))
                params.append(item.get("link")) 
                params.append(item.get("date"))
                cursor.execute(insert_sql,tuple(params))
            self.conn.commit()
                