# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html


# useful for handling different item types with a single interface
import logging
import pymysql
import redis
from scrapy import Request
from scrapy.exceptions import DropItem
from scrapy.pipelines.images import ImagesPipeline
from openpyxl import Workbook
from twisted.enterprise import adbapi
from pymongo import MongoClient
from scrapy import Item


"""————————————————————数据清洗————————————————————"""
# 价格单位转换
class PriceConverterPipeline(object):
    exchange_rate = 8.5309

    def process_item(self,item,spider):
        price = float(item['price'][1:])*self.exchange_rate
        item['price'] = '￥%.2f'%price
        return item


# 将英文的等级描述转换为数字
class RateConverterPipeline(object):
    def process_item(self, item, spider):
        known = {'zero': 0, 'one': 1, 'two': 2, 'three': 3, 'four': 4, 'five': 5,}
        rate_str = item['rate']
        real_rate = rate_str.split(' ')[-1].lower()
        if real_rate in known.keys():
            rate = known[real_rate]
            item['rate'] = rate
        return item


# 转换是否有存货的描述信息
class AvailabilityPipeline(object):
    def process_item(self, item, spider):
        avail_str = item['availability']
        avail = avail_str.split('\n')[2].strip()
        if avail == "In stock":
            real_avail = True
            item['availability'] = real_avail
        else:
            real_avail = False
            item['availability'] = real_avail
        return item


# 数据去重
class DuplicatesPipeline(object):
    def __init__(self):
        self.book_set = set()

    def process_item(self, item, spider):
        name = item['name']
        if name in self.book_set:
            raise DropItem("Duplicate book found: %s" % item)
        self.book_set.add(name)
        return item


"""————————————————————数据保存————————————————————"""
# 将书籍信息保存为csv格式数据
class CSVPipeline(object):
    def process_item(self, item, spider):
        with open("books_message.csv","a",encoding="utf-8") as f:
            oneStr = item["name"]+";"+item["price"]+";"+str(item["rate"])+";"+str(item["availability"])+";"+item["img_url"]+"\n"
            f.write(oneStr)
        return item


# 将书籍信息保存为excel格式数据
class ExcelPipeline(object):
    def __init__(self):
        self.wb = Workbook()
        self.ws = self.wb.active
        self.ws.append(['书名', '价格', '评价等级', '是否有库存', '图片地址'])

    def process_item(self, item, spider):
        line = [item['name'], item['price'], item['rate'], item['availability'], item['img_url']]
        self.ws.append(line)
        self.wb.save('books_message.xlsx')
        return item


# 将数据保存在 MySQL 数据库中
class MySQLPipeline():
    def open_spider(self, spider):
        host = spider.settings.get('MYSQL_HOST')
        port = spider.settings.get('MYSQL_PORT')
        database = spider.settings.get('MYSQL_DATABASE')
        user = spider.settings.get('MYSQL_USER')
        password = spider.settings.get('MYSQL_PASSWORD')
        self.db_connect = pymysql.connect(host=host, port=port, database=database, user=user, password=password, charset='utf8')
        self.cursor = self.db_connect.cursor()

    def close_spider(self, spider):
        self.db_connect.commit()
        self.db_connect.close()

    def process_item(self, item, spider):
        self.insert_db(item)
        return item

    def insert_db(self, item):
        values = (
            # item['upc'],
            item['name'],
            item['price'],
            item['rate'],
            item['availability'],
            item['img_url'],
        )
        sql = 'INSERT INTO books VALUES (%s,%s,%s,%s,%s)'
        self.cursor.execute(sql, values)
        print("数据保存成功！")


class MySQLAsyncPipeline():
    def open_spider(self, spider):
        host = spider.settings.get('MYSQL_HOST')
        port = spider.settings.get('MYSQL_PORT')
        database = spider.settings.get('MYSQL_DATABASE')
        user = spider.settings.get('MYSQL_USER')
        password = spider.settings.get('MYSQL_PASSWORD')
        self.dbpool = adbapi.ConnectionPool('pymysql', host=host, port=port,db=database, user=user, password=password, charset='utf8')

    def close_spider(self, spider):
        self.dbpool.close()

    def process_item(self, item, spider):
        self.dbpool.runInteraction(self.insert_db, item)
        return item

    def insert_db(self, tx, item):
        values = (
            # item['upc'],
            item['name'],
            item['price'],
            item['rate'],
            item['availability'],
            item['img_url'],
        )
        sql = 'INSERT INTO books VALUES (%s,%s,%s,%s,%s)'
        tx.execute(sql, values)
        print("数据保存成功！")


# 将数据保存在 MongoDB 数据库中
class MongoDBPipeline():
    def open_spider(self, spider):
        db_uri = spider.settings.get('MONGODB_URI')
        db_name = spider.settings.get('MONGODB_DB_NAME')
        self.db_client = MongoClient(db_uri)
        self.db = self.db_client[db_name] # 指定数据库

    def close_spider(self, spider):
        self.db_client.close()

    def process_item(self, item, spider):
        self.insert_db(item)
        return item

    def insert_db(self, item):
        if isinstance(item, Item):
            item = dict(item)
        self.db.books.insert_one(item)


# 将数据保存在 Redis 数据库中
class RedisPipeline:
    def open_spider(self, spider):
        db_host = spider.settings.get('REDIS_HOST')
        db_port = spider.settings.get('REDIS_PORT')
        db_index = spider.settings.get('REDIS_DB_INDEX')
        self.db_conn = redis.StrictRedis(host=db_host, port=db_port, db=db_index)
        self.item_i = 0

    def close_spider(self, spider):
        self.db_conn.connection_pool.disconnect()

    def process_item(self, item, spider):
        item['availability'] = str(item['availability'])
        self.insert_db(item)
        return item

    def insert_db(self, item):
        if isinstance(item, Item):
            item = dict(item)

        self.item_i += 1
        self.db_conn.hmset('book:%s' % self.item_i, item)


logger = logging.getLogger("SaveImagePipeline")

class SaveImagePipeline(ImagesPipeline):
    # 用于下载图片的请求
    def get_media_requests(self, item, info):
        yield Request(url = item["img_url"])

    # 判断是否正确下载
    def item_completed(self, results, item, info):
        if not results[0][0]:
            raise DropItem("下载失败")
        # 打印日志
        logger.debug("下载图片成功")
        return item

    # 修改图片名称
    def file_path(self, request, response=None, info=None):
        # 返回图片名称
        return request.url.split("/")[-1]