# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
import logging
import os
from datetime import datetime
from pathlib import PurePosixPath
from urllib.parse import urlparse

import scrapy

import json
import codecs
# useful for handling different item types with a single interface
from itemadapter import ItemAdapter
from scrapy.exceptions import DropItem
from scrapy.pipelines.files import FilesPipeline
from scrapy.utils.project import get_project_settings


# from eolcrawl.database.downloadlink_models import DownloadLinkDB
from eolcrawl.database.roslink_models import RosLinkDB
from eolcrawl.database.rosrepos_models import RosReposDB

from eolcrawl.spiderutils.common import get_str_md5
from eolcrawl.spiderutils.mysqlcom import MySqlUtil

from eolcrawl.spiderutils.log_helper import get_mylogger
logging = get_mylogger("pipelines",level=logging.INFO)


class EolcrawlPipeline:
    def process_item(self, item, spider):
        return item


class AttachmentsPipeline(FilesPipeline):
    def file_path(self, request, response=None, info=None, *, item=None):
        file = self.crawler.spider.name
        try:
            file_path = PurePosixPath(urlparse(request.url).path)
            # file_name = file_path.name
            # 提取文件扩展名
            # suffix = file_name.split('.')[-1]
            suffix = file_path.suffix # 输出：.txt
            affix_urls = item.get('affix_url')
            if affix_urls:
                index = affix_urls.index(request.url)
                if item.get('affix_name'):
                   name:str = item.get('affix_name')[index].strip()
                   if name.endswith(suffix):
                       file_name = name
                   else:
                        file_name = name+suffix
        except ValueError:
            print("'banana' is not in the list.")

        return f"export_data/{file}/{datetime.today().date()}/affix/" + file_name
        # return "affix/" + PurePosixPath(urlparse(request.url).path).name

    def get_media_requests(self, item, info):
        if item.get('affix_url'):
            urls = item['affix_url']
            # print(f"----------------get_media_requests：{urls}--------------------")
            # if len(urls) == 0:
            #     print("------------not exist affix file-------------")
            #     return None

            for url in urls:
                yield scrapy.Request(url)

    def item_completed(self, results, item, info):

        # results是一个列表，包含每个文件下载的结果，格式为(success, file_info_or_error)
        failed_files = [x for is_success, x in results if not is_success]

        # 记录失败的文件信息
        for failure in failed_files:
            # 这里可以记录失败的URL和错误信息，例如：
            # self.log(f"Failed to download file: {failure.get('url')}, Error: {failure.get('message')}")
            print(f"Failed to download file: {failure}")

        # 如果所有文件都下载失败，可以选择丢弃该item
        # if not any(result[0] for result in results):
        #     raise DropItem("All files failed to download.")

        file_paths = [x["path"] for ok, x in results if ok]
        # if not file_paths:
        if file_paths:
            adapter = ItemAdapter(item)
            adapter["affix_file_paths"] = file_paths

        return item


class JsonWithEncodingPipeline():

    def __init__(self):
        settings = get_project_settings()
        profile = settings.get("FILES_STORE")
        filepath = os.path.join(profile,'task_exe.json')
        self.file = codecs.open(filepath, 'w', encoding='utf-8')

    def process_item(self, item, spider):
        if(spider.crawler.spider.type == 'listspider'):
            data = {"url":item.get("url"),"title":item.get("title"),"create_date":datetime.today().strftime("%Y/%m/%d, %H:%M:%S")}
            line = json.dumps(data, ensure_ascii=False) + "\n"
            self.file.write(line)
        return item

    def spider_closed(self, spider):
        self.file.close()

##仅保存新数据到sqllit
# settings.py
#
# ITEM_PIPELINES = {
#     # 'sqlite_demo.pipelines.SqliteDemoPipeline': 300,
#     'sqlite_demo.pipelines.SqliteNoDuplicatesPipeline': 300,
#
# }


class SqliteNoDuplicatesPipeline:

    def __init__(self):
        self.total_cout = 0
        self.exist_cout = 0
        self.exist_no_download = 0
        self.exist_download = 0
        self.insert_cout = 0
        self.update_cout = 0
        self.detail_update_cout = 0
        self.detail_insert_cout = 0

    def process_item(self, item, spider):

        if not hasattr(spider.crawler.spider, 'type'):
            spider.logger.warning("the spider is not attribe type")
            return item

        try:
            ##如果是下载链接列表爬虫
            if (spider.crawler.spider.type == 'listspider'):
                self.total_cout+=1               
                #根据_id和spider name查询是否已经下载过,如果已经下载过,则使用DropItem丢弃数据,否则,插入数据
                # downloadlink_db = DownloadLinkDB()
                downloadlink_db = RosLinkDB()
                id =downloadlink_db.build_id(item)
                result = downloadlink_db.get_download_link(id,spider.name)

                ## If it is in DB, create log message
                if result:
                    spider.logger.debug(f"Item already in database: url: {item['url']} ,spider_name: {spider.name}")
                    self.exist_cout += 1
                    ##根据status字段为1,已经下载过了,不用export到文件中,使用DropItem丢弃数据,否则,export到文件
                    result_status = result[0].get("status") if len(result) > 0 else 0
                    if result_status == 1:
                        self.exist_download  += 1
                    else:
                        self.exist_no_download  += 1
                    DropItem(f"The file has already been downloaded, url: {item['url']} ,spider_name: {spider.name}.")
                ## If text isn't in the DB, insert scrapyd
                else:
                    download_link = downloadlink_db.convert_item_to_downloadlink(item,spider.name)
                    ret = downloadlink_db.insert_download_link(download_link)
                    self.insert_cout += 1
                    spider.logger.debug(f"insert download link: {item['url']} ,spider_name: {spider.name}, ret:{ret}")
            ##如果是详情爬虫
            elif(spider.crawler.spider.type == 'detailspider'):
                try:
                    # detail_db = DetailContentDB()
                    detail_db = RosReposDB()
                    # 首先根据title_id和spider_name查询是否存在记录
                    existing_record = detail_db.get_detail_content(get_str_md5(item["name"]),spider.name)
                    # 将元组转换为列表或字典
                    data_to_insert = detail_db.build_detail_instert_data(item,spider.name)  # 转换为列表                    
                    if existing_record:
                        ret = detail_db.save_detail_content(data_to_insert)
                        spider.logger.warning(
                            f"更新记录 - title_id: {data_to_insert.title_id}, "
                            f"spider_name: {data_to_insert.spider_name}, "
                            f"原标题: {data_to_insert.title}, "
                            f"ret:{ret}"
                        )

                        self.detail_update_cout += 1
                    else:
                        ret = detail_db.insert_detail_content(data_to_insert)
                        spider.logger.info(
                            f"插入新记录 - title_id: {data_to_insert.title_id}, "
                            f"spider_name: {data_to_insert.spider_name}, "
                            f"ret:{ret}"
                        )

                        self.detail_insert_cout += 1  # 需要在类中添加这个计数器
                        
                    if spider.name != 'xinhuaskl':
                        ##更新list状态为已下载
                        # downloadlink_db = DownloadLinkDB()
                        downloadlink_db = RosLinkDB()
                        ret = downloadlink_db.update_status(data_to_insert.href,spider.listfile_name,1)
                        self.update_cout += 1
                        spider.logger.debug(f"更新list状态为已下载: {data_to_insert.href} ,spider_name: {spider.listfile_name}, ret:{ret}")

                except Exception as e:
                    spider.logger.exception(
                        f"数据操作失败 - title_id: {data_to_insert.title_id}, "
                        f"spider_name: {data_to_insert.spider_name}, "
                        f"错误信息: {str(e)}"
                    )
            elif(spider.crawler.spider.type == 'query_spider'):
                try:
                    # detail_db = DetailContentDB()
                    detail_db = RosReposDB()
                    # 首先根据title_id和spider_name查询是否存在记录
                    existing_record = detail_db.get_detail_content(get_str_md5(item["name"]),spider.name)
                    # 将元组转换为列表或字典
                    # data_to_insert = detail_db.build_realsecode_update_data(existing_record,item,spider.name)  # 转换为列表                    
                    data_to_insert = detail_db.build_realsecode_instert_data(item,spider.name)
                    if existing_record is not None:
                        condition = (RosRepos._id == get_str_md5(item["name"]))
                        ret = detail_db.update_detail_contentByCondition(data_to_insert,condition)
                        spider.logger.warning(
                            f"更新记录 - _id: {data_to_insert._id}, "
                            f"spider_name: {spider.name}, "
                            f"name: {item['name']},"
                            f"ret:{ret}"
                        )

                        self.detail_update_cout += 1
                        item['status'] = 1
                        DropItem(f"insert to db not save to file, url: {item['url']} ,spider_name: {spider.name}.")
                    else:
                        # ret = detail_db.insert_detail_content(data_to_insert)
                        spider.logger.info(
                            f"没有找到记录 - _id: {data_to_insert._id}, "
                            f"spider_name: {spider.name}, "
                            f"name: {item['name']},"
                        )

                        self.detail_insert_cout += 1  # 需要在类中添加这个计数器
                        
                    # if spider.name != 'xinhuaskl':
                    #     ##更新list状态已经检查
                    #     # downloadlink_db = DownloadLinkDB()
                    #     downloadlink_db = RosLinkDB()
                    #     ret = downloadlink_db.update_status(data_to_insert.href,spider.listfile_name,1)
                    #     self.update_cout += 1
                    #     spider.logger.debug(f"更新list状态为已下载: {data_to_insert.href} ,spider_name: {spider.listfile_name}, ret:{ret}")

                except Exception as e:
                    spider.logger.exception(
                        f"数据操作失败 - title_id: {data_to_insert.title_id}, "
                        f"spider_name: {data_to_insert.spider_name}, "
                        f"错误信息: {str(e)}"
                    )
            else:
                spider.logger.error(f"spider name: {spider.name} is not listspider or detailspider")
                pass
        except Exception as e:
            spider.logger.exception(f"execute sql failed,{e}")
            raise e
        return item

    def close_spider(self, spider):
        spider.logger.warning(
            f"spider name: {spider.name}\n"
            f"数据统计:\n"
            f"总数量: {self.total_cout}\n"
            f"已存在: {self.exist_cout}\n"
            f"新插入: {self.insert_cout}\n"
            f"状态更新: {self.update_cout}\n"
            f"已下载: {self.exist_download}\n"
            f"未下载: {self.exist_no_download}\n"
            f"详情表插入: {self.detail_insert_cout}\n"
            f"详情表更新: {self.detail_update_cout}"
        )


# pipelines.py
import mysql.connector
from eolcrawl.database.spider_db_models import RosRepos

# # settings.py
#
# ITEM_PIPELINES = {
# #    'mysql_demo.pipelines.MysqlDemoPipeline': 300,
#    'mysql_demo.pipelines.MySQLNoDuplicatesPipeline': 300,
# }
##
##pip install mysql mysql-connector-python
###


class MySQLNoDuplicatesPipeline:
    def __init__(self):
        self.conn = mysql.connector.connect(
            host='localhost',
            user='root',
            password='Eily1990',
            database='quotes'
        )

        ## Create cursor, used to execute commands
        self.cur = self.conn.cursor()

        ## Create quotes table if none exists
        self.cur.execute("""
        CREATE TABLE IF NOT EXISTS quotes(
            id int NOT NULL auto_increment, 
            content text,
            tags text,
            author VARCHAR(255),
            PRIMARY KEY (id)
        )
        """)

    def process_item(self, item, spider):

        ## Check to see if text is already in database
        self.cur.execute("select * from quotes where content = %s", (item['text'],))
        result = self.cur.fetchone()

        ## If it is in DB, create log message
        if result:
            spider.logger.warn("Item already in database: %s" % item['text'])
        ## If text isn't in the DB, insert scrapyd
        else:

            ## Define insert statement
            self.cur.execute(""" insert into quotes (content, tags, author) values (%s,%s,%s)""", (
                item["text"],
                str(item["tags"]),
                item["author"]
            ))

            ## Execute insert of scrapyd into database
            self.connection.commit()
        return item

    def close_spider(self, spider):

        ## Close cursor & connection to database
        self.cur.close()
        self.conn.close()


class MySqlPipeline(object):
    pool = None

    def __init__(self):
        pass

    # 开启爬虫时链接数据库
    def open_spider(self, spider):
        self.pool = MySqlUtil()

    # 处理
    def process_item(self, item, spider):
        try:
            # 执行sql语句
            # sql = "select * from torrent_ye"
            # count = self.pool.get_all(sql, None)
            # print('查询数量为：' + str(count))

            # 先去数据库查询，查到了就不入库了
            sql_select = """select count(1) from torrent_ye where torrent_url = %(torrent_url)s"""
            params_select = {'torrent_url': item['torrent_url']}
            flag = self.pool.get_count(sql_select, params_select)
            if flag > 0:
                spider.logger.info('记录已经存在:[%s][%s]', item['torrent_title'], item['torrent_url'])
                return

            sql_insert = """insert into torrent_ye(torrent_title, torrent_name, torrent_director,
            torrent_actor, torrent_language, torrent_type, torrent_region, torrent_update_time,
            torrent_status, torrent_show_time, torrent_introduction, torrent_url) values
             (%(torrent_title)s,%(torrent_name)s,%(torrent_director)s,%(torrent_actor)s,%(torrent_language)s,
             %(torrent_type)s,%(torrent_region)s,%(torrent_update_time)s,%(torrent_status)s,%(torrent_show_time)s,%(torrent_introduction)s,%(torrent_url)s)"""

            params = {'torrent_title': item['torrent_title'], 'torrent_name': item['torrent_name'],
                      'torrent_director': item['torrent_director'], 'torrent_actor': item['torrent_actor'],
                      'torrent_language': item['torrent_language'], 'torrent_type': item['torrent_type'],
                      'torrent_region': item['torrent_region'], 'torrent_update_time': item['torrent_update_time'],
                      'torrent_status': item['torrent_status'], 'torrent_show_time': item['torrent_show_time'],
                      'torrent_introduction': item['torrent_introduction'], 'torrent_url': item['torrent_url']}

            self.pool.insert_one(sql_insert, params)
            self.pool.end("commit")
        except Exception as e:
            spider.logger.error('发生异常:[%s]', e)
            # traceback.print_exc(e)
            self.pool.end("rollback")

    # 结束
    def close_spider(self, spider):
        pass
#
# 在settings.py中的配置：
#
# 1 FEED_FORMAT = 'json' # 输出格式
# 2 FEED_EXPORTERS_BASE = {
# 3     'json': 'scrapy.exporters.JsonItemExporter',
# 4     'jsonlines': 'scrapy.exporters.JsonLinesItemExporter',
# 5 }

#
# class MyCustomPipeline(object):
#     def __init__(self):
#         self.files = {}
#
#     @classmethod
#     def from_crawler(cls, crawler): # 生成pipeline实例的方法
#          pipeline = cls()
#          crawler.signals.connect(pipeline.spider_opened, signals.spider_opened) # 将spider_opened连接到信号上，当spider打开时执行spider_opened方法
#          crawler.signals.connect(pipeline.spider_closed, signals.spider_closed)
#          return pipeline
#     def spider_opened(self, spider): #
#         file = open('%s_ip.json' % spider.name, 'w+b') # 生成文件描述符
#         self.files[spider] = file # 保存描述符的引用
#         self.exporter = JsonLinesItemExporter(file) # 实例化一个Exporter类
#         self.exporter.start_exporting() # 开始输出
#
#     def spider_closed(self,spider):
#         self.exporter.finish_exporting() # 结束输出
#         #print('*'*50)
#         file = self.files.pop(spider)
#         #print(file.name)
#         file.close()
#
#     def process_item(self, item, spider):
#         self.exporter.export_item(item) # 正式输出
#         return item
