# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html


# useful for handling different item types with a single interface
import os
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from sqlalchemy import Column, String, Integer, Text, DateTime
from sqlalchemy.ext.declarative import declarative_base
from scrapy.pipelines.files import FilesPipeline
from scrapy.pipelines.images import ImagesPipeline
from scrapy.utils.python import to_bytes
import hashlib
from sqlalchemy.exc import IntegrityError

Base = declarative_base()


class TechnologyInfo(Base):
    __tablename__ = 't_technology_info'
    technology_info_id = Column(Integer, primary_key=True)
    data_address = Column(String(1024))
    website_name = Column(String(255))
    spider_name = Column(String(255))
    data_source_type = Column(String(255))
    data_type = Column(String(255))
    collection_mode = Column(String(255))
    collection_time = Column(DateTime)
    publish_time = Column(DateTime)
    title = Column(Text)
    abstract = Column(Text)
    topic = Column(Text)
    text = Column(Text)
    author = Column(Text)
    attached_file_info = Column(Text)


class KyqbCollectionSpiderPipeline:
    @classmethod
    def from_crawler(cls, crawler):
        MYSQL_HOST = crawler.settings.get('MYSQL_HOST')
        MYSQL_PORT = crawler.settings.get('MYSQL_PORT')
        MYSQL_DB = crawler.settings.get('MYSQL_DB')
        MYSQL_USER = crawler.settings.get('MYSQL_USER')
        MYSQL_PASSWD = crawler.settings.get('MYSQL_PASSWD')
        engine = create_engine(f'mysql+pymysql://{MYSQL_USER}:{MYSQL_PASSWD}@{MYSQL_HOST}:{MYSQL_PORT}/{MYSQL_DB}')
        Session = sessionmaker(bind=engine)
        cls.db = Session()
        return cls()

    def process_item(self, item, spider):
        path = []
        for image in item.get("images"):
            path.append(image["path"])
        technology_info = TechnologyInfo()
        technology_info.data_address = item['data_address']
        technology_info.website_name = item['website_name']
        technology_info.spider_name = item['spider_name']
        technology_info.data_source_type = item['data_source_type']
        technology_info.data_type = item['data_type']
        technology_info.collection_mode = item['collection_mode']
        technology_info.collection_time = item['collection_time']
        technology_info.publish_time = item['publish_time']
        technology_info.title = item['title'].strip() if item['title'] else None
        technology_info.abstract = item['abstract'].strip() if item['abstract'] else None
        technology_info.topic = item['topic'].strip() if item['topic'] else None
        technology_info.text = item['text'].strip() if item['text'] else None
        technology_info.author = item['author'].strip() if item['author'] else None
        technology_info.attached_file_info = '!@@!'.join(path)
        self.db.add(technology_info)
        try:
            self.db.commit()
            spider.redis.sadd(spider.name, item['data_address'])
        except IntegrityError:
            print(f"插入数据重复")
        except Exception as e:
            print(f"插入数据失败:\n{e}")
        self.db.close()
        return item


class MinioFilesPipeline(FilesPipeline):
    def file_path(self, request, response=None, info=None):
        file_name = request.meta.get('file-name')
        if file_name:
            return file_name
        else:
            info_list = os.path.splitext(request.url)
            media_guid = info_list[0].split('/')[-1]
            media_ext = info_list[1]
            print(f'开始下载文件:{media_guid}{media_ext}')
            return '%s%s' % (media_guid, media_ext)


class NewImagesPipeline(ImagesPipeline):
    def file_path(self, request, response=None, info=None, *, item=None):
        image_guid = hashlib.sha1(to_bytes(request.url)).hexdigest()
        return f'{image_guid}.jpg'
