# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html


# useful for handling different item types with a single interface
from itemadapter import ItemAdapter
import pymysql

from scrapy.http import Request
from scrapy.pipelines.images import ImagesPipeline

class BqgSqlPipeline:
    def process_item(self, item, spider):
        return item


# 小说信息保存至数据库
class MysqlPipeline:

    # 开启数据库连接
    def open_spider(self, spider):
        # 连接数据库
        self.con = pymysql.connect(host='localhost', port=3306, user='root',
                                   password='shen', db='noval_spider', charset='utf8')
        # 创建cursor
        self.cursor = self.con.cursor()

    def process_item(self, item, spider):
        # 获取img信息并且判断是否为首页信息，
        img = item.get("imgurl")
        if img:
            # 首页信息（信息全面）

            # 获取items里面参数
            category = item.get("category")
            author = item.get("author")
            noval_name = item.get("noval_name")
            noval_titles = item.get("noval_titles")
            image = img

            # 查询category、author是否已经存在，若不存在则创建，若存在则查询相应id

            sql = f'select id from category where category = "{category}";'
            self.cursor.execute(sql)
            c_id = self.cursor.fetchone()

            if c_id is None:
                sql = f'Insert into category values (null,"{category}");'
                self.cursor.execute(sql)
                # 插入以后要提交事务！！！！！！！！！！！！！！！！！！！！！！！！！！！！！！
                self.con.commit()
                self.cursor.execute(f'select id from category where category = "{category}";')
                c_id = self.cursor.fetchone()
            # 查询author

            sql = f'select id from author where author = "{author}";'
            self.cursor.execute(sql)
            a_id = self.cursor.fetchone()
            if a_id is None:
                sql = f'Insert into author values (null,"{author}");'
                self.cursor.execute(sql)
                self.con.commit()
                self.cursor.execute(f'select id from author where author = "{author}";')
                a_id = self.cursor.fetchone()

            # 插入书本名称，以及设置外键
            args = (f"{noval_name}", a_id, c_id)
            sql = f'Insert into noval_name values (null,%s,%s,%s);'
            self.cursor.execute(sql, args)
            self.con.commit()
            # 查询书本id
            self.cursor.execute(f'select id from noval_name where noval_name = "{noval_name}"')
            bid = self.cursor.fetchone()

            # 插入img的对应地址，设置为image/books_pic/{image_urls}
            image_urls = 'images/books_pic/'+image
            # 插入图片对应地址并设置对应图书外键
            args = (f'{image_urls}',bid)
            sql = f'Insert into books_image values (null,%s,%s);'
            self.cursor.execute(sql, args)
            self.con.commit()

            # 插入小说标题
            for noval_title in noval_titles:
                args = (f"{noval_title}", bid)
                sql = f'Insert into booksdetail values (null,%s,%s);'
                self.cursor.execute(sql, args)
                self.con.commit()




        else:
            # 非首页信息（仅有小说名以及章节名）
            noval_name = item.get("noval_name")
            noval_titles = item.get("noval_titles")
            self.cursor.execute(f'select id from noval_name where noval_name = "{noval_name}";')
            bid = self.cursor.fetchone()

            for noval_title in noval_titles:
                args = (f"{noval_title}", bid)
                sql = f'Insert into booksdetail values (null,%s,%s);'
                self.cursor.execute(sql, args)
                self.con.commit()



    # 关闭连接
    def close_spider(self, spider):
        self.cursor.close()
        self.con.close()






# 保存小说图片
class novalImgPipline(ImagesPipeline):

    # 传递图片参数
    def get_media_requests(self, item, info):
        name = item.get('img_name')
        print('-----------------------------------------------------------------')
        return Request(item.get('imgurl'), meta={'name': name})

    # 保存图片
    def file_path(self, request, response=None, info=None, *, item=None):
        name = request.meta.get('name')
        return name




