# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html


# useful for handling different item types with a single interface
from itemadapter import ItemAdapter

import pymysql

class CrawlJdPipeline:
    def open_spider(self,spider):
        self.con = pymysql.connect(host='10.30.56.96',port=3306,user='root',password='123456',database='wdr',charset='utf8')
        self.cur = self.con.cursor()
    def process_item(self, item, spider):
        self.cur.execute('insert into jd_crawl (referenceName,productSize,productColor,creationTime,referenceTime,content) values(%s,%s,%s,%s,%s,%s)',
                         [item['referenceName'],item['productSize'],item['productColor'],item['creationTime'],item['referenceTime'],item['content']])
        self.con.commit()
        return item
    def close_spider(self,spider):
        self.con.close()
