# -*- coding: utf-8 -*-

# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html

import pymysql

class JdPipeline(object):
    def open_spider(self,spider):
        self.conn = pymysql.connect("127.0.0.1","root","pwd","京东商品数据")
        self.cur = self.conn.cursor()
    def close_spider(self,spider):
        self.conn.commit()
        self.conn.close()

    def process_item(self, item, spider):
        crawl_time = item["crawl_time"]  # 抓取时间
        goodsID = item["goodsID"]  # 商品ID
        goodsName = item["goodsName"]  # 商品名
        ziying = item["ziying"]  # 是否自营
        goodsBrand = item["goodsBrand"]  # 商品品牌
        goodsBelong = item["goodsBelong"]  # 商品署名
        weigth = item["weigth"]  # 商品重量
        makein = item["makein"]  # 产地
        age = item["age"]  # 上市年份
        price = item["price"]  # 价格
        goodsSelect = item["goodsSelect"]  # 可选择配置
        goodsLink = item["goodsLink"]  # 商品链接
        shopID = item["shopID"]  # 店铺ID
        shopName = item["shopName"]  # 店铺名
        shopLink = item["shopLink"]  # 店铺链接
        data_tupe = (crawl_time,goodsID,goodsName,ziying,goodsBrand,goodsBelong,weigth,makein,age,price,goodsSelect,goodsLink,shopID,shopName,shopLink)
        self.cur.execute('INSERT INTO `京东手机数据`(`采集时间`,`商品ID`,`商品名称`,`是否自营`,`品牌`,`型号`,`重量`,`产地`,`上市年月`,`价格`,`可选配置`,`商品链接`,`店铺ID`,`店铺名`,`店铺链接`) VALUES {0}'.format(data_tupe))
        self.conn.commit()
        return item
