import http

from lxml import etree

from framework.common import util
from framework.common.pt_logger import logger
from framework.core.crawler_result import Result
from framework.core.crawler_task import Task
from jobs.amazon.spiders.coreProducts_spider.departments_map import departments_map


class CoreProductsParser(object):
    @staticmethod
    def parse(response, current_task):
        parent_result = current_task.parent_result
        if not response:  # 如果request阶段发生错误，那么response会为none，这里要优雅结束
            return
        content = response.content
        tree = etree.HTML(content)
        new_task_list = list()
        results = list()

        if "init" != current_task.metadata["td"]:  # 这里只要不是init，无论是department，rootNode还是node，都是倒数第三个为nodeId
            node_id = response.raw_url.split("/")[-3]  # 如果是rootNode，需要给result添加nodeId和名字
        else:
            node_id = "init"
        # if "rootNode" == current_task.task_types["td"]:  # 如果是rootNode，需要给result添加nodeId和名字
        #     node_id = response.url.split("/")[-3]
        # elif "department" == current_task.task_types["td"]:
        #     node_id = response.url.split("/")[-3]

        if "first_page" == current_task.metadata["tp"]:  # 只有首页进来才需要解析department或rootNode树相关，不然重复了
            current_department = tree.xpath("//li/span[@class='zg_selected']/text()")
            # current_department = tree.xpath("//li/span[@class='_p13n-zg-nav-tree-all_style_zg-selected__1SfhQ']/text()")
            current_department_result = Result.from_task(task=current_task, departmentName=current_department)
            # current_department_result.put_tag("parser_type", "department")
            current_department_result.metadata["tr"] = "browseNode_tree"
            current_department_result.items["parent"] = parent_result.items if parent_result else None
            current_department_result.items["nodeId"] = node_id

            if not current_department_result.tags.get("node_path"):
                current_department_result.tags["node_path"] = list()

            if parent_result:
                prev_node_path_list = parent_result.copy_tags().get("node_path", [])
                prev_node_path_list.append(node_id)
                current_department_result.tags["node_path"] = sorted(set(prev_node_path_list), key=prev_node_path_list.index)  # next_page会有一个重复的node_id推进来，这里简单去个重
            else:
                current_department_result.tags["node_path"].append(node_id)

            # results.append(current_department_result)  ### JM: 暂时不输出department result，重复的太多，没有必要

            if "init" == current_task.metadata["td"]:  # 如果是init进来的，则其解析出来的linkage表示的是department
                linkage_urls_metadata = current_task.copy_metadata({"td": "department"})
            elif "department" == current_task.metadata["td"]:  # 如果是department进来的，则其解析出来的linkage表示的是rootNode，rootNode就开始有nodeId了
                linkage_urls_metadata = current_task.copy_metadata({"td": "rootNode"})
            else:  # 如果是rootNode进来，则其解析出来的linkage表示的是一个正常的Node，有parentNodeId和nodeId
                linkage_urls_metadata = current_task.copy_metadata({"td": "node"})

            linkage_department_urls = [
                (item.xpath("@href"), linkage_urls_metadata, None, {"Name": util.fstring(item.xpath("text()"))}) for item in
                tree.xpath("//li/span[@class='zg_selected']/parent::li/parent::ul/ul/li/a")]

            # if "init" == current_task.metadata["td"]:  # 当前task_type=="init"，表示解析的url_list是department，只有department才需要过滤，rootNode所有的子链接都需要
            #     linkage_department_urls = list(filter(lambda d: d[3].get("Name") in departments_map, linkage_department_urls))

            # for testing purpose ==>
            # if len(linkage_department_urls) > 2:
            #     linkage_department_urls = [linkage_department_urls[0], linkage_department_urls[1]]
            # else:
            #     linkage_department_urls = linkage_department_urls
            # <==
            new_task_list += Task.create_tasks(linkage_department_urls, parser="default", parent_result=current_department_result)  # 每个department的首页，需要同时解析department和core_product

        product_panes = tree.xpath("//li[@class='zg-item-immersion']")
        product_list = list()
        for product_pane in product_panes:
            product_dict = dict()
            product_dict["nodeId"] = node_id if node_id else None
            product_list.append(product_dict)
            rank = product_pane.xpath("descendant::*/span[@class='zg-badge-text']/text()")
            product_dict["rank"] = util.fstring(rank)

            detail_url = product_pane.xpath("descendant::*/span[@class='aok-inline-block zg-item']/a[@class='a-link-normal']/@href")
            product_dict["detail_url"] = util.fstring(detail_url)
            try:
                asin_index = product_dict["detail_url"].split("/").index("dp") + 1
                asin = product_dict["detail_url"].split("/")[asin_index]
                product_dict["asin"] = asin
            except Exception as ex:
                product_dict["asin"] = ""
                logger.error("parse core-product asin error...")
                logger.error(ex)

            product_title = product_pane.xpath("descendant::*/span[@class='aok-inline-block zg-item']/a[@class='a-link-normal']/div/text()")
            product_dict["product_title"] = util.fstring(product_title)

            img_url = product_pane.xpath("descendant::*/img/@src")
            product_dict["img_url"] = util.fstring(img_url)

            img_alt = product_pane.xpath("descendant::*/img/@alt")
            product_dict["img_alt"] = util.fstring(img_alt)

            stars = product_pane.xpath("descendant::*/span[@class='a-icon-alt']/text()")
            product_dict["stars"] = util.fstring(stars)

            review_url = product_pane.xpath("descendant::*/div[@class='a-icon-row a-spacing-none']/a[@class='a-size-small a-link-normal']/@href")
            product_dict["review_url"] = util.fstring(review_url)

            review_count = product_pane.xpath("descendant::*/div[@class='a-icon-row a-spacing-none']/a[@class='a-size-small a-link-normal']/text()")
            product_dict["review_count"] = util.fstring(review_count)

            price_range = product_pane.xpath("descendant::*/span[@class='a-size-base a-color-price']/descendant::*/text()")
            product_dict["price_range"] = util.frange(price_range)

            pass
            # is_prime = xxx

        current_products_result = Result.from_task(task=current_task, products=product_list)
        current_products_result.metadata["tr"] = "core_product"

        if not current_products_result.tags.get("node_path"):
            current_products_result.tags["node_path"] = list()

        if parent_result:
            prev_node_path_list = parent_result.copy_tags().get("node_path", [])
            prev_node_path_list.append(node_id)
            current_products_result.tags["node_path"] = sorted(set(prev_node_path_list), key=prev_node_path_list.index)  # next_page会有一个重复的node_id推进来，这里简单去个重
        else:
            current_products_result.tags["node_path"].append(node_id)

        next_page_url = tree.xpath("//ul[@class='a-pagination']/li[@class='a-last']/a/@href")
        # 下面主要是创建第二页的tasks
        next_or_last_page = "next_page" if len(util.fstring(next_page_url)) > 0 else "last_page"
        current_products_result.metadata["tp"] = next_or_last_page
        results.append(current_products_result)

        new_task_list += Task.create_tasks(util.fstring(next_page_url), parser='default',
                                           parent_result=current_products_result,
                                           metadata=current_task.copy_metadata({"tp": next_or_last_page}))  # 如果是最后一页，则next_page_url为空，所以tasks也为空
        update_spider_job_status(current_task.metadata['batchId'], current_task.metadata['tt'])
        return results, new_task_list


def update_spider_job_status(job_id, spider_code):
    """
    update the spider status in the DynamoDB
    :param job_id:
    :param spider_code:
    :return:
    """
    dict_spider_code = {
        "CoreProductsBestSellers": "BestSeller",
        "CoreProductsMostWishedFor": "MostWishedFor",
        "CoreProductsGiftIdeas": "GiftIdea",
        "CoreProductsMoversAndShakers": "MoversNShakers",
        "CoreProductsNewReleases": "NewRelease"
    }

    conn = http.client.HTTPSConnection("api.amzn-plus.com")
    payload = "{\"jobId\":\"%s\",\"spiderCode\":\"%s\",\"status\":\"In progress\"}" % (job_id, dict_spider_code[spider_code])
    headers = {
        'Content-Type': 'text/plain'
    }
    print("Update spider status payload:", payload)
    conn.request("PUT", "/prod/analytics/jobs/updateSQSJob", payload, headers)
    res = conn.getresponse()
    data = res.read()
    print(data.decode("utf-8"))


if __name__ == "__main__":
    # payload = "{\"jobId\":\"%s\",\"spiderCode\":\"%s\",\"status\":\"In progress\"}" % ("2021-09-25", "GiftIdea")
    update_spider_job_status("2021-09-25", "CoreProductsNewReleases")

    import json
    from framework.mock import MockResponse, MockTask
    parser = CoreProductsParser()
    sampler_local_html = "./samples/giftideas_subNode1.html"
    response = MockResponse(sampler_local_html)
    parser.parse(response, MockTask({"batchId": "20210430", "tt": "BestSellers", "td": "init", "tp": "first_page"}))

    # update_spider_job_status("")