# -*- coding: utf-8 -*-
import pymysql
from scrapy import Request
from scrapy.exceptions import DropItem
from scrapy.pipelines.images import ImagesPipeline

# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html


class SinaPipeline(object):
    def process_item(self, item, spider):
        if item["imgUrl"]==None:
            raise DropItem("drop item")
        else:
            return item
        # pass

class MysqlPipeline(object):
    def __init__(self,host,database,user,password,port):
        self.host=host
        self.user = user
        self.password = password
        self.port = port
        self.database = database

    @classmethod
    def from_crawler(cls,crawler):
        return cls(
            host=crawler.settings.get("MYSQL_HOST"),
            user=crawler.settings.get("MYSQL_USER"),
            password=crawler.settings.get("MYSQL_PASSWORD"),
            port=crawler.settings.get("MYSQL_PORT"),
            database=crawler.settings.get("MYSQL_DATABASE"),
        )

    def open_spider(self,spider):
        print("连接数据库",self.host, self.user, self.password, self.database, self.port)
        # 连接数据库
        self.db=pymysql.connect(self.host,self.user,self.password,self.database,charset="utf8",port=self.port)
        self.cursor=self.db.cursor()

    def process_item(self,item,spider):
        # 执行数据库表写入
        sql="insert into book(title,bookUrl,price,price_pre,discount,author,date,place,num,imgName,imgUrl,intro,dataindex) " \
            "values(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)"
        param=(item["title"],item["bookUrl"],item["price"],item["price_pre"],item["discount"],item["author"],item["date"],item["place"],item["num"],item["imgName"],item["imgUrl"],item["intro"],item["dataindex"])
        self.cursor.execute(sql,param)
        self.db.commit()
        return item

    def close_spider(self,spider):
        # 关闭数据库连接
        self.db.close()


class ImagePipeline(ImagesPipeline):
    # 自定义图片存储类

    def get_media_requests(self,item,info):
        '''通过抓取的item对象获取图片地址，并创建Request请求对象添加调度队列'''
        yield Request(item["imgUrl"])

    def file_path(self,request,response=None,info=None):
        '''返回图片下载后保存的名称，没有此方法scrapy则自动给一个唯一值作为图片名称'''
        url=request.url
        file_name=url.split("/")[-1]
        return file_name

    def item_completed(self,results,item,info):
        '''下载完成后的处理方法'''
        image_paths=[x['path'] for ok,x in results if ok]
        if not image_paths:
            raise DropItem("Item contains no images")
        item["imgName"]=image_paths[0]
        # print(image_paths[0])
        return item