import itertools
import json
from framework.common.pt_logger import logger
from framework.core.crawler_task import Task
from framework.core.sqs_task_pool import SQSTaskPool
from framework.middlewares.proxy_scraperapi_middleware import ProxyScraperApiMiddleware
from framework.middlewares.random_header_middleware import RandomHeaderMiddleware
from framework.middlewares.random_sleep_middleware import RandomSleepMiddleware
from framework.output.s3_output import S3Output
from framework.output.sqs_output import SQSOutput
from framework.request import Request
from framework.retryable_request import RetryableRequest
from framework.serverless_spider import ServerlessSpider
from jobs.amazon.spiders.keywords_spider.keywords_parser import KeywordsParser
from jobs.amazon.util import sqs_util

spider_name = "keywords"
# batch_id = "20210801"
s3_bucket = "v2-data-production"


def setup_spider(task_pool, batch_id, task_type):
    request = RetryableRequest(Request()) \
        .middleware(RandomHeaderMiddleware()) \
        .middleware(ProxyScraperApiMiddleware()) \
        .middleware(RandomSleepMiddleware())  # JimmyMo: serverless spider不需要自己控制RandomSleep了，可以依靠lambda本身的能力来实现

    def get_output_prefix(current_task, result):
        return current_task.metadata["tt"] + "/file"

    def get_leaf_node_notification(current_task, result):
        return json.dumps({"batchId": result.metadata.get("batchId")})

    def is_leaf_node(current_task, result):
        return result.metadata.get("tp") == "last_page"

    monitor_queue_name = "pt-spider-serverless-monitor-queue"

    outputs = [S3Output(s3_bucket=s3_bucket, s3_folder="%s/%s" % ("SpiderOutput/keywords", batch_id), get_prefix_func=get_output_prefix, compress=True),
               SQSOutput(queue_name=monitor_queue_name, get_queue_message=get_leaf_node_notification, condition=is_leaf_node)]
    parsers_map = {
        "default": KeywordsParser(),
    }
    spider = ServerlessSpider(request, task_pool, parsers_map, outputs)
    return spider


def setup_task_pool(name):
    pool_name = "pt-spider-serverless-%s-queue" % name
    task_pool = SQSTaskPool(name=pool_name)
    sqs_util.create_if_not_existed(queue_name=pool_name)
    return task_pool


def start_initiator(metadata=None, payload=None):
    batch_id = metadata["time"].split("T")[0]
    init_tasks_map = {
        "keywords": [("https://www.amazon.com/s?k=mini+fridge", {"batchId": batch_id, "SpiderName": "KeywordsSpider", "tt": "Keywords", "td": "init", "tp": "last_page"}, None, None)]
    }
    task_pool = setup_task_pool(spider_name)
    count = sqs_util.check_sqs_remain_message_count(task_pool.queue_name)
    if count and int(count) <= 0:
        init_tasks = Task.create_tasks(init_tasks_map[spider_name], parser="default", parent_result=None)
        task_pool.enqueue(init_tasks)
    else:
        logger.warning("there are still %s messages in the queue %s, please purge the queue and restart" % (count, task_pool.queue_name))


def start_worker(task_json):
    batch_id = task_json["metadata"]["batchId"]
    task = Task.from_json(task_json)
    task_type = task.metadata["tt"]
    task_pool = setup_task_pool(spider_name)
    spider = setup_spider(task_pool, batch_id=batch_id, task_type=task_type)
    spider.listen_and_start(task)


if __name__ == "__main__":
    # start_initiator()
    message_body = """{
                  "url": "https://www.amazon.com/s?k=mini+fridge",
                  "metadata": {
                    "batchId": "20210801",
                    "tt": "Keywords",
                    "td": "init",
                    "tp": "last_page"
                  },
                  "parser": "default",
                  "desc": null,
                  "tags": {},
                  "parent_result": null
                }"""
    message = json.loads(message_body)
    task = Task.from_json(message)
    start_worker(task)
