# -*- coding: utf-8 -*-

# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
import pymysql
from .items import QvanshuwangItem, ContentItem
import logging
from scrapy.exceptions import DropItem
logger = logging.getLogger(__name__)


class QvanshuwangPipeline(object):
    def open_spider(self, spider):
        data_config = spider.settings["DATABASE_CONFIG"]  # 转到settings里设置
        if data_config["type"] == "mysql":
            print("建立连接")
            self.conn = pymysql.connect(**data_config["config"])  # 连接数据库
            self.cursor = self.conn.cursor()  # 创建游标
            spider.conn = self.conn
            spider.cursor = self.cursor

    def process_item(self, item, spider):
        if isinstance(item, QvanshuwangItem):
            # 存储之前先进行判断
            sql = "select id from novel where book_name=%s and author=%s"
            self.cursor.execute(sql, (item["book_name"], item["author"]))
            if not self.cursor.fetchone():  # 如果检索不到，就可以存储
                try:
                    sql = "insert into novel (category,author,book_name,status,description,c_time,url)" \
                          "values(%s,%s,%s,%s,%s,%s,%s)"
                    self.cursor.execute(sql, (
                        item['category'],
                        item['author'],
                        item['book_name'],
                        item['status'],
                        item['description'],
                        item['c_time'],
                        item['url']
                    ))
                    novel_id = self.cursor.lastrowid
                    self.conn.commit()  # 提交
                except Exception as e:
                    self.conn.rollback()
                    logger.warning("小说信息错误   url=%s %s" % (item["url"], e))

                try:
                    # 写入章节信息
                    sql = "insert into chapter (novel_id,title,ordernum,c_time,url)values(%s,%s,%s,%s,%s)"

                    data_list = []
                    for index, chapter in enumerate(item["chapter_list"]):
                        title, url = chapter  # 元组赋值
                        ordernum = index
                        c_time = item["c_time"]
                        # 数量比较大，所以把它变成一个数组，用executemany来执行
                        data_list.append((novel_id, title, ordernum, c_time, url))
                    self.cursor.executemany(sql, data_list)
                    self.conn.commit()
                except Exception as e:
                    self.conn.rollback()
                    logger.warning("小说章节信息错误  url=%s %s"%(item["url"],e))
            return item
        elif isinstance(item, ContentItem):
            try:
                sql = "update chapter set content=%s where url=%s"
                self.cursor.execute(sql, (item["content"], item["url"]))
                self.conn.commit()
            except Exception as e:
                self.conn.rollback()
                logger.warning("小说内容信息错误  url=%s %s" % (item["url"], e))
            return item
        else:
            return DropItem


    def close_spider(self,spider):
        data_config = spider.settings["DATABASE_CONFIG"]  # 转到settings里设置
        if data_config["type"] == "mysql":
            print("建立连接")
            self.cursor.close()
            self.conn.close()