# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
import json
import os
import urllib.request

import pymysql

from itemadapter import ItemAdapter
from scrapy.exceptions import DropItem


class DangdangPipeline:
    # 声明周期函数open_spider， process_item， close_spider
    # 在爬虫文件开始的之前就执行的一个方法
    def open_spider(self, spider):
        self.fp = open('book.json', 'w', encoding='utf-8')

    def process_item(self, item, spider):
        # 最终写法
        item_dict = dict(item)
        self.fp.write(json.dumps(item_dict, ensure_ascii=False))
        return item

    # 在爬虫文件执行完之后就执行的一个方法
    def close_spider(self, spider):
        self.fp.close()



# 多条管道开启
# 1、定义管道类
# 2、settings中开启管道
class DangdangDownloadPipeline:


    def process_item(self, item, spider):
        # 最终写法
        url="https:"+item.get('img')
        name_ = (item.get('name'))[:6]
        filename='./books/' + name_ + '.jpg'
        # 判断文件夹是否存在，如果不存在则创建
        os.makedirs(os.path.dirname(filename), exist_ok=True)

        # 下载图片
        try:
            urllib.request.urlretrieve(url, filename)
            spider.logger.info(f"图片：{name_} 下载成功")
        except Exception as e:
            spider.logger.error(f"Failed to download image: {url}. Error: {e}")
        return item


class DangdangToMysqlPipeline:
    def __init__(self, host, port, user, password, database, charset, table):
        self.host = host
        self.port = port
        self.user = user
        self.password = password
        self.database = database
        self.charset = charset
        self.table = table

    @classmethod
    def from_crawler(cls, crawler):
        return cls(
            host=crawler.settings.get('MYSQL_HOST'),
            port=crawler.settings.get('MYSQL_PORT'),
            user=crawler.settings.get('MYSQL_USER'),
            password=crawler.settings.get('MYSQL_PASSWORD'),
            database=crawler.settings.get('MYSQL_DATABASE'),
            charset=crawler.settings.get('DB_CHARSET'),
            table=crawler.settings.get('MYSQL_TABLE')
        )

    def open_spider(self, spider):
        try:
            # 连接数据库
            self.connection = pymysql.connect(
                host=self.host,
                port=self.port,
                user=self.user,
                password=self.password,
                database=self.database,
                charset=self.charset,
            )
            self.cursor = self.connection.cursor()
            spider.logger.info("成功连接到数据库")
        except Exception as e:
            spider.logger.error(f"数据库连接失败: {e}")
            raise DropItem(f"数据库连接失败: {e}")



    def process_item(self, item, spider):
        adapter = ItemAdapter(item)
        # 安全的 SQL 语句
        sql = """
            INSERT INTO {} (name, img, price, author, intro, publisher)
            VALUES (%s, %s, %s, %s, %s, %s)
        """.format(self.table)
        # 提取 Item 中的数据
        data = (
            adapter.get('name'),
            'https:' + adapter.get('img', ''),
            float(adapter['price'][1:]) if adapter['price'] and adapter['price'].startswith('¥') and len(adapter['price']) > 1 else 0.0,  # 处理价格
            adapter.get('author'),
            adapter.get('intro'),
            adapter.get('publisher'),
        )

        try:
            # 执行 SQL 语句
            self.cursor.execute(sql, data)
            # 提交事务
            self.connection.commit()
            spider.logger.info(f"数据成功插入数据库: {adapter['name']}")
        except Exception as e:
            # 如果发生错误，回滚事务并记录错误日志
            self.connection.rollback()
            spider.logger.error(f"Failed to insert item into database: {e}")
            raise DropItem(f"Failed to insert item into database: {e}")

        return item

    def close_spider(self, spider):
        # 关闭数据库连接
        self.cursor.close()
        self.connection.close()