import re

from framework.common.pt_logger import logger
from framework.core.crawler_task import Task
from framework.core.sqs_task_pool import SQSTaskPool
from jobs.amazon.util import sqs_util
from util.eventManager import EventManager


def fregex(pattern, text, index=0):
    matcher = re.search(pattern, text, flags=re.IGNORECASE)
    if matcher:
        return matcher.group(index)
    else:
        return ''


def setup_task_pool(name):
    pool_name = "pt-spider-serverless-%s-queue" % name
    task_pool = SQSTaskPool(name=pool_name)
    sqs_util.create_if_not_existed(queue_name=pool_name)
    return task_pool


def start_initiator(metadata=None, payload=None):
    """
    每次处理定量的S3上的文件数，若么有结束，则要继续发送相同事件再次唤起继续处理
    :return:
    """
    import boto3
    import zlib
    import json
    s3 = boto3.client("s3")
    events = boto3.client("events")

    def iterate_bucket_items(bucket, prefix, starting_token):
        pagination_config = {
            'MaxItems': 9,  # JimmyMo: 每个lambda最多处理5页就交给下一个
            'PageSize': 3,  # JimmyMo: 每页30个文件
            'StartingToken': starting_token
        }
        if starting_token is None or starting_token == "":
            del pagination_config["StartingToken"]

        paginator = s3.get_paginator('list_objects_v2')
        page_iterator = paginator.paginate(Bucket=bucket, Prefix=prefix, PaginationConfig=pagination_config)
        for page in page_iterator:
            if page['KeyCount'] > 0:
                for item in page['Contents']:
                    # print(item)
                    yield item
        next_starting_token = page_iterator.resume_token
        if next_starting_token:
            EventManager().pt_spider_state_changed(state="STARTED",
                                                   metadata={
                                                       "SpiderName": "AsinSpider"
                                                   },
                                                   payload={
                                                       "tt": "ProductDetails",
                                                       "batchId": batch_id,  # JimmyMo: 闭包变量
                                                       "StartingToken": next_starting_token
                                                   })
            logger.info(f"sent next starting token {next_starting_token}")
            #
            # putEventsResp = events.put_events(Entries=[{
            #     "Source": "pt.spider",
            #     "DetailType": "Spider State-change Notification",
            #     "Detail": json.dumps({"state": "STARTED",
            #                           "metadata": {
            #                               "SpiderName": "AsinSpider"
            #                           },
            #                           "payload": {
            #                               "tt": "ProductDetails",
            #                               "batchId": batch_id,  # JimmyMo: 闭包变量
            #                               "StartingToken": next_starting_token
            #                           }
            #                           }),
            # }]
            # )
            # logger.info(putEventsResp)
        else:
            logger.info("end of page")

    def stream_gzip_decompress(stream):
        dec = zlib.decompressobj(32 + zlib.MAX_WBITS)  # offset 32 to skip the header
        for chunk in stream:
            rv = dec.decompress(chunk)
            if rv:
                yield rv

    def init_tasks(s3path, starting_token):
        amazon_domain = "https://www.amazon.com"
        bucket = fregex(r"s3://(.+?)/(.+)", s3path, 1)
        prefix = fregex(r"s3://(.+?)/(.+)", s3path, 2)
        init_detail_task_list = list()
        init_review_task_list = list()
        init_faq_task_list = list()
        for file_handler in iterate_bucket_items(bucket=bucket, prefix=prefix, starting_token=starting_token):
            # print(file_handler)
            logger.info(f"fetching file {file_handler['Key']} from s3")
            obj = s3.get_object(Bucket=bucket, Key=file_handler["Key"])
            body = obj["Body"]
            file_content = ""
            for data in stream_gzip_decompress(body):
                file_content += data.decode("utf-8")
            json_obj_list = [json.loads(f) for f in file_content.splitlines()]

            for json_obj in json_obj_list:
                asin = json_obj["asin"]
                detail_url = json_obj["detail_url"]
                if detail_url is None or detail_url == "":
                    detail_url = f"/dp/{asin}"
                review_url = json_obj["review_url"]
                if review_url is None or review_url == "":
                    review_url = f"/product-reviews/{asin}?sortBy=recent"
                faq_url = f"/ask/questions/asin/{asin}?sort=SUBMIT_DATE"
                detail_task_template = [(f"{amazon_domain}/{detail_url.lstrip('/')}",
                                         {"asin": asin, "batchId": batch_id, "SpiderName": "AsinSpider",
                                          "tt": "AsinDetailPage"}, None, None)]
                review_task_template = [(f"{amazon_domain}/{review_url.lstrip('/')}",
                                         {"asin": asin, "batchId": batch_id, "SpiderName": "AsinSpider",
                                          "tt": "AsinCustomerReview"}, None, None)]
                faq_task_template = [(f"{amazon_domain}/{faq_url.lstrip('/')}",
                                      {"asin": asin, "batchId": batch_id, "tt": "AsinQuestionAndAnswer"}, None, None)]
                init_detail_task_list += Task.create_tasks(detail_task_template, parser="dpParser", parent_result=None)
                init_review_task_list += Task.create_tasks(review_task_template, parser="reviewParser",
                                                           parent_result=None)
                init_faq_task_list += Task.create_tasks(faq_task_template, parser="qaParser", parent_result=None)

        return init_detail_task_list, init_review_task_list, init_faq_task_list

    def init_task_pool(taskType, init_tasks):
        task_pool = setup_task_pool(taskType)
        count = sqs_util.check_sqs_remain_message_count(task_pool.queue_name)
        if count and int(count) > 0:
            logger.warning("there are still %s messages in the queue %s, please purge the queue and restart" % (
                count, task_pool.queue_name))
        # task_pool.enqueue(init_tasks)
        logger.info(f"enqueue {len(init_tasks)} into queue")
        logger.info(init_tasks)

    # batch_id = "2021-07-01"
    # # batch_id = "2020-11-06-01-53-00"
    # bucket = "pt-data-dev"
    # # bucket = "v1-data-production"
    # prefix = f"staging/dedupe/{batch_id}"
    # prefix = f"SpiderOutput/core-products/{batch_id}"

    starting_token = payload.get("StartingToken")
    logger.info(f"received starting_token {starting_token}")
    s3pathList = payload.get("s3pathList")
    init_detail_task_list = list()
    init_review_task_list = list()
    init_faq_task_list = list()
    for s3path in s3pathList:
        temp_detail_task_list, temp_review_task_list, temp_faq_task_list = init_tasks(s3path, starting_token)
        init_detail_task_list.extend(temp_detail_task_list)
        init_review_task_list.extend(temp_review_task_list)
        init_faq_task_list.extend(temp_faq_task_list)

    init_task_pool("AsinDetailPage", init_detail_task_list)
    init_task_pool("AsinCustomerReview", init_detail_task_list)
    init_task_pool("AsinQuestionAndAnswer", init_detail_task_list)

    print("Done")


if __name__ == "__main__":
    batch_id = "2021-07-01"
    metadata = {}
    payload = {
        "s3pathList": [f"s3://pt-data-dev/staging/dedupe/{batch_id}"],
        "StartingToken": ""
    }
    start_initiator(metadata, payload)
