# coding=utf-8
import logging
import re

import sys

from venusdemilo import title_processor, image_processor, item_emitter, item_dumper, settings, utils

reload(sys)
sys.setdefaultencoding('utf-8')


def split_processor_clspath(processor_desc):
    if '->' in processor_desc:
        tmp = processor_desc.split('->')
        # if len(tmp) == 1:
        processor_clspath = tmp[0]
        processor_parameter_dict = eval(tmp[1])
    else:
        processor_clspath = processor_desc
        processor_parameter_dict = None

    return processor_clspath, processor_parameter_dict


if __name__ == "__main__":
    formatter = logging.Formatter(fmt='%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s',
                                  datefmt='%a, %d %b %Y %H:%M:%S')
    logger = logging.getLogger()
    stdout_handler = logging.StreamHandler(sys.stdout)
    stdout_handler.setFormatter(formatter)
    logger.addHandler(stdout_handler)
    file_handler = logging.FileHandler(filename="image_crawler.log")
    file_handler.setFormatter(formatter)
    logger.addHandler(file_handler)

    logger.setLevel(logging.DEBUG)

    logging.info('started...')
    # item_emitting.db_db = "localhost"
    item_emitter.query_string = 'SELECT id, title, content, spider, category, tag, url, referer, issue_date, source, createDtTm FROM scrapydb.t_sinahealth_dump'
    # item_emitter.query_string = 'SELECT id, title, content, spider, category, tag, url, referer, issue_date, source, createDtTm FROM scrapydb.t_sinahealth_dump limit 5'
    item_list = item_emitter.emit(None)

    logging.info("total records %s..." % len(item_list))
    index = 0

    for item in item_list:
        logging.info("processing #%d item, title is %s..." % (index, item['title']))
        processor_list = utils.match_kv(item['url'], settings.PROCESSOR_MAPPING)
        if processor_list:
            for processor_desc in processor_list:
                processor_clspath, processor_parameter_dict = split_processor_clspath(processor_desc)

                if processor_parameter_dict:
                    processor = utils.load_class(processor_clspath)(**processor_parameter_dict)
                else:
                    processor = utils.load_class(processor_clspath)()
                item = processor.processing(item)
        else:
            logging.warn("no configured processor list found to item %s..." % item)

        # item = title_processor.processing(item)
        # item = image_processor.processing(item)

        dumper_list = utils.match_kv(item['url'], settings.DUMPER_MAPPING)
        if dumper_list:
            for dumper_clspath in dumper_list:
                dumper = utils.load_class(dumper_clspath)()
                dumper.dump(item)

        index += 1
    # url = 'http://epaper.21jingji.com/html/2017-01/23/content_55283.htm'
    # html = download_page(url)
    # get_image(html)
