from squirrel_bakend.config.basic import SPIDER_MAP
from concurrent.futures import ThreadPoolExecutor, as_completed, Future
from squirrel_bakend.task.scrapy_task import scrapy_task


def start_scrapy_spider(params):
    web_type = params.get("web_type", "")
    crawler_type = params.get("crawler_type", "")
    serialNumber = params.get("serialNumber", "")

    check_flag, result = crawler_web_check(crawler_type, web_type)
    if check_flag:
        scrapy_clz_name = f"{web_type}_{crawler_type}"
        result = {'serialNumber': serialNumber, 'webType': web_type, 'crawlerType': crawler_type, 'message': 'success', 'code': 0, 'success': True}

        with ThreadPoolExecutor() as executor:
            # future = executor.submit(scrapy_task, scrapy_clz_name, params)
            # try:
            #     result_dict = future.result(timeout=120)
            #     result.update(result_dict)
            # except Exception:
            #     result.update(
            #         {'serialNumber': serialNumber, 'code': 201, 'message': '启动爬虫任务超时', 'success': False,
            #          'web_type': web_type, 'crawler_type': crawler_type}
            #     )
            # return result
            tasks = []
            future = executor.submit(scrapy_task, scrapy_clz_name, params)
            tasks.append(future)
            try:
                for future in as_completed(tasks, timeout=60):
                    result_dict = future.result()
                    result.update(result_dict)
            except Exception:
                result.update(
                    {'serialNumber': serialNumber, 'code': 201, 'message': '启动爬虫任务超时', 'success': False,
                     'web_type': web_type, 'crawler_type': crawler_type}
                )
            return result
    else:
        return {'serialNumber': serialNumber, 'code': 202, 'message': 'crawler_type或web_type不支持', 'success': False,
                'web_type': web_type, 'crawler_type': crawler_type}


def crawler_web_check(crawler_type, web_type):
    if crawler_type not in SPIDER_MAP:
        return False, {'note': f'不支持:{crawler_type}', 'code': 202, 'success': False}

    if web_type not in SPIDER_MAP[crawler_type]['web_type']:
        return False, {'note': f'不支持:{web_type}', 'code': 202, 'success': False}

    return True, None


if __name__ == "__main__":
    start_scrapy_spider({'serialNumber': '11112222', 'crawler_type': 'score', 'web_type': 'malasong', 'spider_config': {'user_name': '杜璐', 'card_num': '511321198612302823'}})
