# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html


# useful for handling different item types with a single interface
from itemadapter import ItemAdapter
import pymysql


def field_is_None(field):
    if field == None:
        field = 'NULL'
    return field

class DbTushuPipeline:
    def process_item(self, item, spider):
        conn = pymysql.connect(host='127.0.0.1', user='root', password='root', database='db_tushu')
        title = item['title']
        author = item['author']
        binding = item['binding']
        binding = field_is_None(binding)
        publisher = item['publisher']
        price = item['price']
        pages = item['pages']
        pages = field_is_None(pages)
        pubdate = item['pubdate']
        isbn = item['isbn']
        isbn = field_is_None(isbn)
        summary = item['summary']
        summary = field_is_None(summary)
        image = item['image']
        print(title, author, binding, publisher, price, pages, pubdate, isbn, summary, image)
        sql = "insert into book(title, author, binding, publisher, price, pages, pubdate, " \
              "isbn, summary, image) values('" + title + "', '" + author + "', '" + binding \
              + "', '" + publisher + "', '" + price + "', '" + pages + "', '" + pubdate \
              + "', '" + isbn + "', '" + summary + "', '" + image +"')"
        conn.query(sql)
        conn.close()
        return item



