import scrapy
from gbifSpider.items import GbifspiderItem
from gbifSpider.settings import DB_SETTINGS
from scrapy import signals
from twisted.internet import reactor

import json
import math
import pymysql
import time
import datetime
from itertools import chain

class GbifSpider(scrapy.Spider):
    name = "gbif"
    allowed_domains = []
    start_urls = ["https://www.gbif.org"]
    search_url = "https://www.gbif.org/api/omnisearch?locale=zh&q="
    occurrence_all_url = "https://www.gbif.org/api/occurrence/search?advanced=false&mediaType=stillImage&locale=zh&offset=%d&limit=%d&occurrence_status=present&year=%d,%d"
    occurrence_url = "https://www.gbif.org/api/occurrence/search?advanced=false&taxon_key=%d&mediaType=stillImage&locale=zh&offset=%d&limit=%d"
    occurrence_size = 0
    occurrence_count = 0
    url_count = 0
    gbif_url_count = 1000
    page_size = 100
    
    def gbif_url_exist(self):

        # 获取数据库配置参数
        db_name = DB_SETTINGS
        db_params = db_name['gbif']

        """
        MySQL查询
        :return:
        """
        # 打开数据库连接
        db = pymysql.connect(host=db_params['host'],
                            user=db_params['user'],
                            password=db_params['password'],
                            database=db_params['db'])
        # 使用 cursor() 方法创建一个游标对象 cursor
        cursor = db.cursor()
        # SQL语句
        sql = 'select CONCAT(usage_key, img_key) as `key` from gbif_url;'
        # 使用 execute()  方法执行 SQL 查询
        cursor.execute(sql)
        # 获取第一条结果
        gbif_url = cursor.fetchall()
        
        # 关闭数据库连接
        db.close()

        gbif_url_list = list(chain.from_iterable(gbif_url))
        # 返回查询结果
        return gbif_url_list
    
    def insert_gbif_url(self, item):

        # 获取数据库配置参数
        db_name = DB_SETTINGS
        db_params = db_name['gbif']

        """
        MySQL查询
        :return:
        """
        # 打开数据库连接
        db = pymysql.connect(host=db_params['host'],
                            user=db_params['user'],
                            password=db_params['password'],
                            database=db_params['db'])
        # 使用 cursor() 方法创建一个游标对象 cursor
        cursor = db.cursor()

        table_name = 'gbif_url'
        col = ','.join(item.keys())
        value_list = ['%s' % str(item.get(i, '')) for i in item]
        values_params = '%s, ' * (len(item) - 1) + '%s'
        # 对数据库进行插入操作，并不需要commit，twisted会自动commit
        insert_sql = 'replace into %s (%s) values (%s)' % (table_name, col, values_params)
        try:
            cursor.execute(insert_sql, tuple(value_list))
            print("gbif_url数据插入成功 => " + str(cursor.lastrowid))
            self.url_count += 1
        except Exception as e:
            print(value_list)
            print(insert_sql)
            print("gbif_url执行sql异常 => " + str(e))
        finally:
            # 关闭数据库连接
            db.close()
            pass

    def start_requests(self):

        is_over = False

        while is_over == False:
            keywords = [
                'Lavandula angustifolia'
            ]

            # self.gbif_url = self.gbif_url_exist()
            if len(keywords) > 0:
                for keyword in keywords:
                    url = self.search_url + keyword
                    yield scrapy.Request(url=url, callback=self.get_url)
            else:
                current_year = datetime.date.today().year
                for year in range(1500 , current_year):
                # for year in range(1815 , 1817):
                    url = self.occurrence_all_url % (0, self.page_size, year, year + 1)
                    print(url)
                    yield scrapy.Request(url=url, callback=self.get_url_year, meta={'year': year})

            print('gbif_url_count %d' % self.gbif_url_count)
            print('self.url_count %d' % self.url_count)

            if self.url_count >= self.gbif_url_count:
                is_over = True

    def get_url_year(self, response):
        json_str = response.text
        json_dic = json.loads(json_str)
        results = json_dic['results']
        self.occurrence_size = json_dic['count']
        self.gbif_url_count = json_dic['count']
        year = response.meta['year']
    
        if self.occurrence_size > 0:
            
            for result in results:
                medias = result['media']
                self.occurrence_count += 1
                image_item = GbifspiderItem()
                image_item['usageKey'] = result['taxonKey']
                image_item['imgKey'] = result['key']
                image_item['year'] = year
                key = '%s%s' % (image_item['usageKey'], image_item['imgKey'])
                # if key in self.gbif_url:
                #     continue

                img_count = 0
                for media in medias:
                    if 'identifier' in media and 'type' in media and media['type'] == 'StillImage':
                        # image_item['image'].append(media['identifier'])
                        image_item['image'] = media['identifier']
                        img_count = img_count + 1
                        if img_count > 0:
                            yield image_item
                
                data = {}
                data['usage_key'] = image_item['usageKey']
                data['img_key'] = image_item['imgKey']
                # data['img_count'] = img_count
                # data['create_time'] = int(time.time())
                # data['year'] = year
                self.insert_gbif_url(data)

            print('正在抓取第%d页，第%d条，共%d条' % (1, self.occurrence_count, self.occurrence_size))

            pages = self.calculate_pages(self.occurrence_size, self.page_size)
            for page in range(1, pages):
                url = self.occurrence_all_url % (page * self.page_size, self.page_size, year, year + 1)
                print(url)
                yield scrapy.Request(url=url, callback=self.parse_year, meta={'page': page, 'year': year})

    def parse_year(self, response):
        page = response.meta['page']
        json_str = response.text
        json_dic = json.loads(json_str)
        results = json_dic['results']
        year = response.meta['year']
        for result in results:
            medias = result['media']
            self.occurrence_count += 1
            print('正在抓取第%d页，第%d条，共%d条' % (page + 1, self.occurrence_count, self.occurrence_size))
            image_item = GbifspiderItem()
            image_item['usageKey'] = result['taxonKey']
            image_item['imgKey'] = result['key']
            image_item['year'] = year
            key = '%s%s' % (image_item['usageKey'], image_item['imgKey'])
            # if key in self.gbif_url:
            #     continue

            img_count = 0
            for media in medias:
                if 'identifier' in media and 'type' in media and media['type'] == 'StillImage':
                    # image_item['image'].append(media['identifier'])
                    image_item['image'] = media['identifier']
                    img_count = img_count + 1
                    if img_count > 0:
                        yield image_item

            data = {}
            data['usage_key'] = image_item['usageKey']
            data['img_key'] = image_item['imgKey']
            # data['img_count'] = img_count
            # data['create_time'] = int(time.time())
            # data['year'] = year
            self.insert_gbif_url(data)

    def get_url(self, response):
        json_str = response.text
        json_dic = json.loads(json_str)
        speciesMatches = json_dic['speciesMatches']['results']
        for index in range(0, len(speciesMatches) - 1):
            if index > 0:
                break
            speciesMatche = speciesMatches[index]

            url = self.occurrence_url % (speciesMatche["usageKey"], 0, self.page_size)
            yield scrapy.Request(url=url, callback=self.parse_next, meta={'usageKey': speciesMatche["usageKey"]})
    
    def parse_next(self, response):
        json_str = response.text
        json_dic = json.loads(json_str)
        results = json_dic['results']
        self.occurrence_size = json_dic['count']
        self.gbif_url_count = json_dic['count']
        usageKey = response.meta['usageKey']
        for result in results:
            medias = result['media']
            self.occurrence_count += 1
            image_item = GbifspiderItem()
            image_item['usageKey'] = usageKey
            image_item['imgKey'] = result['key']
            key = '%s%s' % (image_item['usageKey'], image_item['imgKey'])
            # if key in self.gbif_url:
            #     continue

            img_count = 0
            for media in medias:
                if 'identifier' in media and 'type' in media and media['type'] == 'StillImage':
                    # image_item['image'].append(media['identifier'])
                    image_item['image'] = media['identifier']
                    if 'creator' in media :
                        image_item['creator'] = media['creator']
                    if 'publisher' in media :
                        image_item['publisher'] = media['publisher']
                    if 'license' in media :
                        image_item['license'] = media['license']
                    if 'references' in media :
                        image_item['references'] = media['references']
                    if 'created' in media :
                        image_item['created'] = media['created']
                    if 'rightsHolder' in media :
                        image_item['rightsHolder'] = media['rightsHolder']
                    if 'identifier' in media :
                        image_item['identifier'] = media['identifier']
                    if 'scientificName' in result :
                        image_item['scientificName'] = result['scientificName']
                    if 'country' in result :
                        image_item['country'] = result['country']
                    if 'kingdom' in result :
                        image_item['kingdom'] = result['kingdom']
                    if 'phylum' in result :
                        image_item['phylum'] = result['phylum']
                    if 'order' in result :
                        image_item['order'] = result['order']
                    if 'family' in result :
                        image_item['family'] = result['family']
                    if 'genus' in result :
                        image_item['genus'] = result['genus']
                    img_count = img_count + 1
                    if img_count > 0:
                        yield image_item
            
            data = {}
            data['usage_key'] = image_item['usageKey']
            data['img_key'] = image_item['imgKey']
            # data['img_count'] = img_count
            # data['create_time'] = int(time.time())
            self.insert_gbif_url(data)

        print('第一页抓取完成')

        pages = self.calculate_pages(self.occurrence_size, self.page_size)
        for page in range(1, pages):
            url = self.occurrence_url % (usageKey, page * self.page_size, self.page_size)
            print(url)
            yield scrapy.Request(url=url, callback=self.parse, meta={'page': page, 'usageKey': usageKey})
    
    def parse(self, response):
        page = response.meta['page']
        json_str = response.text
        json_dic = json.loads(json_str)
        results = json_dic['results']
        usageKey = response.meta['usageKey']
        for result in results:
            medias = result['media']
            self.occurrence_count += 1
            print('正在抓取第%d页，第%d条，共%d条' % (page, self.occurrence_count, self.occurrence_size))
            image_item = GbifspiderItem()
            image_item['usageKey'] = usageKey
            image_item['imgKey'] = result['key']
            key = '%s%s' % (image_item['usageKey'], image_item['imgKey'])
            # if key in self.gbif_url:
            #     continue

            img_count = 0
            for media in medias:
                if 'identifier' in media and 'type' in media and media['type'] == 'StillImage':
                    # image_item['image'].append(media['identifier'])
                    image_item['image'] = media['identifier']
                    if 'creator' in media :
                        image_item['creator'] = media['creator']
                    if 'publisher' in media :
                        image_item['publisher'] = media['publisher']
                    if 'license' in media :
                        image_item['license'] = media['license']
                    if 'references' in media :
                        image_item['references'] = media['references']
                    if 'created' in media :
                        image_item['created'] = media['created']
                    if 'rightsHolder' in media :
                        image_item['rightsHolder'] = media['rightsHolder']
                    if 'identifier' in media :
                        image_item['identifier'] = media['identifier']
                    if 'scientificName' in result :
                        image_item['scientificName'] = result['scientificName']
                    if 'country' in result :
                        image_item['country'] = result['country']
                    if 'kingdom' in result :
                        image_item['kingdom'] = result['kingdom']
                    if 'phylum' in result :
                        image_item['phylum'] = result['phylum']
                    if 'order' in result :
                        image_item['order'] = result['order']
                    if 'family' in result :
                        image_item['family'] = result['family']
                    if 'genus' in result :
                        image_item['genus'] = result['genus']
                    img_count = img_count + 1
                    if img_count > 0:
                        yield image_item

            data = {}
            data['usage_key'] = image_item['usageKey']
            data['img_key'] = image_item['imgKey']
            # data['img_count'] = img_count
            # data['create_time'] = int(time.time())
            self.insert_gbif_url(data)
                
    def calculate_pages(self, total_data_count, data_per_page):
        if data_per_page <= 0:
            raise ValueError("每页数据条数必须大于0")
        total_pages = math.ceil(total_data_count / data_per_page)
        return total_pages