# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html


# useful for handling different item types with a single interface
from itemadapter import ItemAdapter
import scrapy
from scrapy.pipelines.images import ImagesPipeline
from gbifSpider.items import GbifspiderItem, GbifspiderImageItem
import os
import requests
from scrapy.pipelines.images import ImagesPipeline
from gbifSpider.settings import DB_SETTINGS, IMAGES_STORE
from bs4 import BeautifulSoup
import hashlib
from scrapy.utils.python import to_bytes
import time
import pymysql
from twisted.enterprise import adbapi
from openpyxl import Workbook, load_workbook

class GbifSpiderPipeline(object):

    def __init__(self, db_pool):
        self.db_pool = db_pool

        self.file_name = "data.xlsx"
        # 检查目录是否存在
        if os.path.exists(self.file_name):
            self.wb = load_workbook(self.file_name)
        else:
            self.wb = Workbook()

        self.ws = self.wb.active
        self.ws.append(['目', '科', '属', '学名', '创建人', '发布者', '记录授权', '参考文献', '已创建', '权利持有人', '标识符', '建议的属性', '图片路径', 'usage_key', 'img_key'])
        

    @classmethod
    def from_settings(cls, settings):
        """
        建立数据库的连接
        :param settings:
        :return: db_pool数据库连接池
        """
        # 获取数据库配置参数
        db_name = DB_SETTINGS
        db_params = db_name['gbif']
        # 连接数据池ConnectionPool，使用pymysql，连接需要添加charset='utf8'，否则中文显示乱码
        db_pool = adbapi.ConnectionPool('pymysql', **db_params, charset='utf8mb4')
        return cls(db_pool)

    def process_item(self, item, spider):
        # 对item进行处理
        if type(item) == GbifspiderItem:
            data = {}
            data['image'] = item['image']
            data['usage_key'] = item['usageKey']
            data['img_key'] = item['imgKey']
            if 'year' in item:
                data['year'] = item['year']
            data['create_time'] = int(time.time())
            data['img_id'] = hashlib.sha1(to_bytes(data['image'])).hexdigest()
            
            if 'creator' in item:
                data['creator'] = item['creator']
            else:
                data['creator'] = ''
            if 'publisher' in item:
                data['publisher'] = item['publisher']
            else:
                data['publisher'] = ''
            if 'license' in item:
                data['license'] = item['license']
            else:
                data['license'] = ''
            if 'references' in item:
                data['`references`'] = item['references']
            else:
                data['`references`'] = ''
            if 'created' in item:
                data['created'] = item['created']
            else:
                data['created'] = ''
            if 'rightsHolder' in item:
                data['rightsHolder'] = item['rightsHolder']
            else:
                data['rightsHolder'] = ''
            if 'identifier' in item:
                data['identifier'] = item['identifier']
            else:
                data['identifier'] = ''
            if 'scientificName' in item:
                data['scientificName'] = item['scientificName']
            else:
                data['scientificName'] = ''
            if 'country' in item:
                data['country'] = item['country']
            else:
                data['country'] = ''
            if 'scientificName' in item and 'country' in item:
                data['suggested'] = data['scientificName'] + ' ' + data['country']
            else:
                data['suggested'] = ''
            if 'kingdom' in item:
                data['kingdom'] = item['kingdom']
            else:
                data['kingdom'] = ''
            if 'phylum' in item:
                data['phylum'] = item['phylum']
            else:
                data['phylum'] = ''
            if 'order' in item:
                data['`order`'] = item['order']
            else:
                data['`order`'] = ''
            if 'family' in item:
                data['family'] = item['family']
            else:
                data['family'] = ''
            if 'genus' in item:
                data['genus'] = item['genus']
            else:
                data['genus'] = ''

            # folder = '%s%s' % (item['usageKey'], item['imgKey'])
            folder = '%s' % (item['scientificName'])
            image_guid =  data['img_id'] + '.jpg'
            data['img_local'] = IMAGES_STORE + '/' + folder + '/' + image_guid

            is_exist = self.gbif_exist(data)

            if is_exist == False :
                self.db_pool.runInteraction(self.do_insert, data)

                line = [data['`order`'], data['family'], data['genus'], data['scientificName'], data['creator'],  data['publisher'], data['license'], data['`references`'], data['created'], data['rightsHolder'], data['identifier'], data['suggested'], data['img_local'], data['usage_key'], data['img_key']]
                self.ws.append(line)
                self.wb.save(self.file_name)
                self.wb.close()
                print("excel数据插入成功")
        
        return item
    
    
    def gbif_exist(self, data):

        # 获取数据库配置参数
        db_name = DB_SETTINGS
        db_params = db_name['gbif']

        """
        MySQL查询
        :return:
        """
        # 打开数据库连接
        db = pymysql.connect(host=db_params['host'],
                            user=db_params['user'],
                            password=db_params['password'],
                            database=db_params['db'])
        # 使用 cursor() 方法创建一个游标对象 cursor
        cursor = db.cursor(cursor=pymysql.cursors.DictCursor)
        # SQL语句
        sql = "select id, image, img_id, `img_key`, usage_key from gbif where usage_key = '%s' and img_key = '%s' and image = '%s'" % (data['usage_key'], data['img_key'], data['image'])
        # 使用 execute()  方法执行 SQL 查询
        cursor.execute(sql)
        # 获取第一条结果
        gbif = cursor.fetchall()
        
        # 关闭数据库连接
        db.close()

        # 返回查询结果
        if len(gbif) > 0:
            return True
        else:
            return False
    
    @staticmethod
    def do_insert(cursor, item):
        table_name = 'gbif'
        col = ','.join(item.keys())
        value_list = ['%s' % str(item.get(i, '')) for i in item]
        values_params = '%s, ' * (len(item) - 1) + '%s'
        # 对数据库进行插入操作，并不需要commit，twisted会自动commit
        insert_sql = 'insert into %s (%s) values (%s)' % (table_name, col, values_params)
        try:
            cursor.execute(insert_sql, tuple(value_list))
            print("gbif数据插入成功 => " + str(cursor.lastrowid))
        except Exception as e:
            print(value_list)
            print("gbif执行sql异常 => " + str(e))
            pass

class GbifImagesPipeline(ImagesPipeline):
    
    def file_path(self, request, response = None, info = None, item = None):
        if type(item) == GbifspiderImageItem:
            # 该方法是在图片将要被存储的时候调用，来获取这个图片存储路径
            path = super(GbifImagesPipeline, self).file_path(request, response, info, item = None)
            # print(path)
            # images_store = IMAGES_STORE
            # category_path = images_store
            # if not os.path.exists(category_path):
            #     os.mkdir(category_path)
            
            # folder = '%s%s' % (item['usageKey'], item['imgKey'])
            
            folder = '%s' % (item['scientificName'])
            image_name = path.replace('full/', folder + '/')
            # image_path = os.path.join(category_path, image_name)
            return image_name

    def item_completed(self, results, item, info):
        if type(item) == GbifspiderImageItem:
            data = {}
            data['img_id'] = item['imgId']
            data['image'] = item['image']
            data['usage_key'] = item['usageKey']
            data['img_key'] = item['imgKey']
            data['create_time'] = int(time.time())
            for ok, result in results:
                if ok:
                    path = result['path']
                    url = result['url']
                    data['is_download'] = 1
                    print(f'Downloaded url is {url}')
                    print(f'Downloaded image saved in {path}')
                else:
                    url = 'https://www.gbif.org/api/template/occurrence/%s?locale=zh' % str(item['imgKey'])
                    print('抓取失败，使用缓存图片:' + url)
                    data['is_download'] = 0
                    response = requests.get(url)
                    web_content = response.text
                    soup = BeautifulSoup(web_content, 'html.parser')
                    card_figures = soup.select('.card-figure')
                    for card_figure in card_figures:
                        dt = card_figure.select_one('.card__content dt[translate="ocurrenceFieldNames.identifier"]')
                        if dt is not None:
                            alink = dt.parent.find('a')
                            href = alink['href']
                            if href == item['image']:
                                imgContainer = card_figure.select_one('a.imgContainer')
                                imgContainerHref = imgContainer['href']
                                imgContainerHref = 'https:' + imgContainerHref.replace('/tools/zoom/simple.html?src=', '')
                                img_response = requests.get(imgContainerHref, timeout=30)
                                print('缓存图片url:' + imgContainerHref)
                                if img_response.status_code == 200:
                                    # folder = '%s%s' % (item['usageKey'], item['imgKey'])
                                    
                                    folder = '%s' % (item['scientificName'])
                                    image_guid = hashlib.sha1(to_bytes(imgContainerHref)).hexdigest() + '.jpg'

                                    # 获取当前Scrapy项目的路径
                                    current_project_path = os.path.abspath(os.path.dirname(__file__) + '../../images') + '/' + folder

                                    # 检查目录是否存在
                                    if not os.path.exists(current_project_path):
                                        # 如果不存在，创建目录
                                        os.mkdir(current_project_path)
                                    with open(current_project_path + '/' + image_guid, 'wb') as file:
                                        file.write(img_response.content)
                                    data['image'] = imgContainerHref
                                    data['is_download'] = 1
                                    print(f'Retry Downloaded url is {imgContainerHref}')
                                    print(f'Retry Downloaded image saved in {image_guid}')
                                else:
                                    # with open('./error.txt', 'a+') as file:
                                    #     text = item['image'] + '|' + str(item['usageKey']) + '|' + str(item['imgKey'])
                                    #     file_content = file.read()
                                    #     print(file_content)
                                    #     file_content = file_content.replace(text, '') + text
                                    #     file.write(file_content + os.linesep)
                                    data['is_download'] = -1
                                    data['img_local'] = ''
                
                data['is_read'] = 1
                data['is_doing'] = 0

                self.do_update_gbif(data)

                # img_count = self.do_select_img_count(item)
                # current_img_count = self.do_current_img_count(item)

                # if data['is_download'] == -1:
                #     img_count = img_count - 1

                # if current_img_count >= img_count:
                #     self.do_update_gbif_url(item)

            return item

    def do_update_gbif(self, item):
        table_name = 'gbif'
        
        # 获取数据库配置参数
        db_name = DB_SETTINGS
        db_params = db_name['gbif']

        # 打开数据库连接
        db = pymysql.connect(host=db_params['host'],
                            user=db_params['user'],
                            password=db_params['password'],
                            database=db_params['db'])
        # 使用 cursor() 方法创建一个游标对象 cursor
        cursor = db.cursor()

        update_sql = "update %s set is_read = %s, is_download = %s, is_doing = %d where img_id = '%s'" % (table_name, item['is_read'], item['is_download'], item['is_doing'], item['img_id'])
        try:
            cursor.execute(update_sql)
            print("gbif数据更新成功 => " + update_sql)
        except Exception as e:
            print("gbif执行sql异常 => " + str(e))
            pass
        finally:
            # 关闭数据库连接
            db.close()

    def do_update_gbif_url(self, item):
        table_name = 'gbif_url'
        
        # 获取数据库配置参数
        db_name = DB_SETTINGS
        db_params = db_name['gbif']

        # 打开数据库连接
        db = pymysql.connect(host=db_params['host'],
                            user=db_params['user'],
                            password=db_params['password'],
                            database=db_params['db'])
        # 使用 cursor() 方法创建一个游标对象 cursor
        cursor = db.cursor()

        update_sql = "update %s set is_read = 1 where usage_key = '%s' and img_key = '%s'" % (table_name, item['usageKey'], item['imgKey'])
        try:
            cursor.execute(update_sql)
            print("gbif_url数据更新成功 => " + update_sql)
        except Exception as e:
            print("gbif_url执行sql异常 => " + str(e))
            pass
        finally:
            # 关闭数据库连接
            db.close()
    
    def do_select_img_count(self, item):

        # 获取数据库配置参数
        db_name = DB_SETTINGS
        db_params = db_name['gbif']

        """
        MySQL查询
        :return:
        """
        # 打开数据库连接
        db = pymysql.connect(host=db_params['host'],
                            user=db_params['user'],
                            password=db_params['password'],
                            database=db_params['db'])
        # 使用 cursor() 方法创建一个游标对象 cursor
        cursor = db.cursor()
        # SQL语句
        sql = """
        select img_count from gbif_url where usage_key = '%s' and img_key = '%s';
        """ % (item['usageKey'], item['imgKey'])
        # 使用 execute()  方法执行 SQL 查询
        cursor.execute(sql)
        # 获取第一条结果
        img_count = cursor.fetchone()
        
        # 关闭数据库连接
        db.close()
        # 返回查询结果
        return img_count[0]
    
    def do_current_img_count(self, item):

        # 获取数据库配置参数
        db_name = DB_SETTINGS
        db_params = db_name['gbif']

        """
        MySQL查询
        :return:
        """
        # 打开数据库连接
        db = pymysql.connect(host=db_params['host'],
                            user=db_params['user'],
                            password=db_params['password'],
                            database=db_params['db'])
        # 使用 cursor() 方法创建一个游标对象 cursor
        cursor = db.cursor()
        # SQL语句
        sql = """
        select count(1) as current_img_count from gbif where usage_key = '%s' and img_key = '%s';
        """ % (item['usageKey'], item['imgKey'])
        # 使用 execute()  方法执行 SQL 查询
        cursor.execute(sql)
        # 获取第一条结果
        current_img_count = cursor.fetchone()
        
        # 关闭数据库连接
        db.close()
        # 返回查询结果
        return current_img_count[0]

    def get_media_requests(self, item, info):
        if type(item) == GbifspiderImageItem:
            print(item['image'])
            image_path = item['image']
            yield scrapy.Request(image_path)