# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html


# useful for handling different item types with a single interface
from itemadapter import ItemAdapter
import urllib.request
import os
import pymysql

class DoubanPipeline:
    def process_item(self, item, spider):
            #保存图片
        # url = item['img_addr']
        # req = urllib.request.Request(url)
        # with urllib.request.urlopen(req) as pic:
        #     data = pic.read()
        #     file_name = os.path.join(r'D:\bookpic',item['name'] + '.jpg')
        #     with open(file_name, 'wb') as fp:
        #         fp.write(data)
        
        #保存到数据库

        info = [item['title'], item['score'], item['publish_detail'], item['slogan'], item['publish_house'], item['img_src'], item['detail_addr'], item['comment_people']]
        connection = pymysql.connect(host='localhost', user='root', password='jykjyk2017', database='topbook', charset='utf8')
        try:
            with connection.cursor() as cursor:
                sql = 'insert into app_book (title, score, publish_detail, slogan, publish_house, img_src, detail_addr, comment_people) values (%s, %s, %s, %s, %s, %s, %s, %s)'
                affectedcount = cursor.execute(sql, info)
                print('成功修改{0}条数据'.format(affectedcount))
                connection.commit()
        except pymysql.DatabaseError:
            connection.rollback()
        finally:
            connection.close()
    
        return item

