# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html


# useful for handling different item types with a single interface
from itemadapter import ItemAdapter
import pymysql

'''-------------------------------网易新闻  pipelines.py-----------------------------------'''
class ScrapyStudy2Pipeline:
    def process_item(self, item, spider):
        # 打开数据库连接
        db = pymysql.connect("192.168.1.99","root","1","local" )
        # 使用 cursor() 方法创建一个游标对象 cursor
        cursor = db.cursor()
        try:
            for it in item:
                sql = 'insert into wynews (title,content) values ('"+it['title']+"','"+it['content']+"')'
                cursor.execute(sql)
                db.commit()
        except:
            db.rollback()
        # return item


