# -*- coding: utf-8 -*-
from Scrapy import Scrapy
from DATABASE import Database
from Download import Download
from Kits import Kits
import argparse
import time
import Constant
import logging
import logging.config

def Logger_creator():
    logging.config.dictConfig(Constant.LOG_CONGIF)
    logger = logging.getLogger('print')
    return logger

def scrapy_modual(singleurl):

    spider.get_post_url(singleurl)
    data,error = spider.multi_crawler()
    logger.debug('Received urls: \n' + str(data))
    db.databaseinsert(data)
    logger.info('Data(' + artistName +') inserted completed.')

    if error:
        logger.warning('Connection issues, now rescrapying...')
        redata,error = spider.multi_crawler(error)
        db.databaseinsert(redata)

def download_modual():
    logger.info('Downloading(' + artistName + ') according to database...')
    output = db.databaseoutput(spider.tag)
    data = Down.multi_download(output)
    db.databasedownloaded(data)
    logger.info('Download(' + artistName +') completed')

def update_modual():
    UpdateCounter = 0
    logger.info('Updating(' + artistName + ') artworks information into database...')
    (maxUpdateDate,result) = db.databaseupdate(artistName)
    spider.page_check()
    for singleurl in spider.page_list:
        spider.get_post_url(singleurl)
        data,error = spider.multi_crawler()
        db.databaseinsert(data)

        #一般来说，更新一次要小于72，则会在第一页便完成更新，因此大概率只需要检索第一页的时间戳
        (maxUpdateDateNow,result) = db.databaseupdate(artistName)
        for updateThing in result:
            if updateThing > maxUpdateDate:
                UpdateCounter += 1
        if UpdateCounter < 72:
            logger.info(str(UpdateCounter) + ' artworks are Updated!')
            return
        else:
            UpdateCounter = 0

    #这是一种极端情况，当时隔很久之后，画师更新了72张以上，则需要用以下方法
    #当使用更新模式进行爬取模式时（这是可行的，但不推荐），也会调用到此段
    UpdateCounter = 0
    (maxUpdateDateNow,result) = db.databaseupdate(artistName)
    for i in result:
        if i < maxUpdateDate:
            UpdateCounter += 1
    logger.info(str(UpdateCounter) + ' artworks are Updated!')

if __name__ == '__main__':
    logger = Logger_creator()

    parser = argparse.ArgumentParser(prog = 'Furaffinity Gallery Downloader',
                                    description = Constant.TEXT)
    parser.add_argument('-s','--scrapy',action = 'store_true',help = 'Scrape only')
    parser.add_argument('-d','--download',action = 'store_true',help = 'Download from database')
    parser.add_argument('-m','--mix',action = 'store_true',help = 'Scrape and download，not recommended')
    parser.add_argument('-c','--check',action = 'store_true',help = 'Check how many pages')
    parser.add_argument('-u','--update',action = 'store_true',help = 'Update Gallery')

    args = parser.parse_args()

    spider = Scrapy()
    db     = Database()
    Down   = Download()
    db.databaseCreate()
    Kits = Kits()
    artistName = Kits.get_artist_name()
    
    if args.check:
        logger.info('Page Check(' + artistName + ') processing...')
        pagenum = spider.page_check()
        logger.info(pagenum + ' pages can be found currently.')

    if args.scrapy:
        logger.info('***Scrapy(' + artistName + ') modual starting...***')
        time.sleep(1)
        logger.info('Page Check(' + artistName + ') processing...')
        spider.page_check()
        logger.info('Posted urls(' + artistName + ') retrieving...')
        for singleurl in spider.page_list:
            scrapy_modual(singleurl)
        logger.info('***Scrapy(' + artistName + ') completed***')

    if args.download:
        logger.info('***Download(' + artistName + ') modual starting...***')
        download_modual()
        logger.info('***Download(' + artistName + ') completed***')

    if args.mix:
        logger.info('***Scrapy and Download(' + artistName + ') modual starting...***')
        logger.info('Page Check(' + artistName + ') processing...')
        spider.page_check()
        for singleurl in spider.page_list:
            logger.info('Posted urls(' + artistName + ') retrieving...')
            scrapy_modual(singleurl)

            download_modual()

    if args.update:
        update_modual()

