# -*- coding: utf-8 -*-

# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html
import pymysql
import scrapy
from scrapy.pipelines.images import ImagesPipeline


from scrapy.exceptions import DropItem
import os

class AiHuiShouPipeline(object):
    def process_item(self, item, spider):
        img = item.get("img", "N/A")
        price = item.get("price", "N/A")
        name = item.get("name", "N/A")
        brand = item.get('brand', "N/A")
        sql = "insert into goods(name, price,brand ) VALUES (%s, %s,%s)"
        self.db_cur.execute(sql, (name, price, brand))
        self.db_conn.commit()
        return item

    # 打开数据库
    def open_spider(self , spider):
        db = spider.settings.get('MYSQL_DB_NAME', 'ai_hui_shou')
        host = spider.settings.get('MYSQL_HOST', 'localhost')
        port = spider.settings.get('MYSQL_PORT', 3306)
        user = spider.settings.get('MYSQL_USER', 'root')
        passwd = spider.settings.get('MYSQL_PASSWORD', '199326')
        self.db_conn = pymysql.connect( host = host, port = port, db = db, user = user, passwd = passwd, charset = 'utf8')
        self.db_cur = self.db_conn.cursor()
        print('数据库打开')

    # 关闭数据库
    def close_spider(self, spider):
        self.db_conn.commit()
        self.db_conn.close()

# imgPipeline 下载图片
class ImagesPipeline(ImagesPipeline):
    # 从item获取url，返回request对象给pipeline处理
    def get_media_requests(self, item, info):
        yield scrapy.Request(item['img'],meta={ 'name':item['name'] })

    def item_completed(self, results, item, info):
        image_paths = [x['path'] for ok, x in results if ok]
        if not image_paths:
            raise DropItem("Item contains no images")
        return item

    def file_path(self, request, response=None, info=None):

        # 提取url中间数字作为图片名。
        name = request.meta['name']
        return 'full/%s.jpg' % (name)


