# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
from typing import Any

import scrapy
# useful for handling different item types with a single interface
from itemadapter import ItemAdapter
from pymysql.cursors import DictCursor
from scrapy import Request
from scrapy.http import Response
from scrapy.pipelines.images import ImagesPipeline
from scrapy.pipelines.media import MediaPipeline
from twisted.enterprise import adbapi


class Day15Pipeline:
    def process_item(self, item, spider):
        return item

class ImageDownloadPipeline(ImagesPipeline):
    """
    继承ImagesPipeline图片存储管道
    与FilesPipeline相同
    会自动构建请求交给调度器然后进行下载和存储
    我们需要做的就是重写get_media_request、file_path等方法并且在setting中配置
    """
    def get_media_requests(
        self, item: Any, info: MediaPipeline.SpiderInfo
    ) -> list[Request]:
        print(2)
        if info.spider.name in ['ssr1_movies_c','ssr1_movies']:
            yield scrapy.Request(url=item['cover'],meta={'item':item})


    def file_path(
        self,
        request: Request,
        response: Response | None = None,
        info: MediaPipeline.SpiderInfo | None = None,
        *,
        item: Any = None,
    ) -> str:
        print(3)
        return request.meta['item']['title'].split(' ')[0] + '.png'

"""
异步将数据存储到mysql中
1、在setting中配置mysql
2、与数据库建立链接  用from_crawler(cls, crawler)方法  cls为当前类 crawler为爬虫项目对象
"""

class MoviesMysqlPipeline:
    @classmethod
    #这个方法是为了获取settings中的配置信息
    def from_crawler(cls, crawler):
        db_params = dict(
            host=crawler.settings.get('MYSQL_HOST'),
            port=crawler.settings.get('MYSQL_PORT'),
            user=crawler.settings.get('MYSQL_USER'),
            password=crawler.settings.get('MYSQL_PASSWORD'),
            database=crawler.settings.get('MYSQL_DATABASE'),
            charset=crawler.settings.get('MYSQL_CHARSET'),
            use_unicode=True,
            cursorclass=DictCursor
        )
        #adbapi.ConnectionPool('pymysql',**db_params) 创建异步数据库连接池 使用pymysql作为驱动 将db_params进行拆包**传入
        return cls(adbapi.ConnectionPool('pymysql',**db_params))

    def __init__(self, dbpool):
        """
        :param dbpool:经过 from_crawler方法 生成的连接池
        """
        self.dbpool = dbpool

    def process_item(self, item, spider):
        if spider.name in ['ssr1_movies_c','ssr1_movies']:
            movie = self.dbpool.runInteraction(self.movies_insert,item)
            movie.addErrback(self.error_except,item,spider) #异常反馈  出现异常调用error_except方法
        return item

    def movies_insert(self,cursor,item):
        cursor.execute(
            'INSERT INTO `ssr1_movie` VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s)',
            (
                item['id'].strip(),
                item['title'].strip(),
                item['category'].strip(),
                item['addr'].strip(),
                item['timer'].strip(),
                item['date'],
                item['drama'].strip(),
                item['score'].strip(),
                item['cover'].strip(),
            )
        )

    def error_except(self,e,item,spider):
        print('error',e,item.items())