# -*- coding: utf-8 -*-

"""
通用的计算图谱diff的代码：Spark相关
"""

from __future__ import unicode_literals
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division

from typing import Set, AnyStr, Optional, Callable
from pyspark import RDD, Broadcast


def rdd_diff_by_id(rdd1, rdd2, id_field="@id"):  # type: (RDD, RDD) -> RDD
    """
    计算两个JSON RDD的完整差异
    :param rdd1: [{"@id": ""}]
    :param rdd2: [{"@id": ""}]
    :param id_field: id字段的名称，默认是@id
    :return: [@id => ({JSON1}, {JSON2})]
    """
    entity1_rdd = rdd1.map(lambda x: (x[id_field], x))
    entity2_rdd = rdd2.map(lambda x: (x[id_field], x))
    return entity1_rdd.fullOuterJoin(entity2_rdd)


def handle_diff(diff_rdd, ignore, handler,
                broadcast_schema=None):  # type: (RDD, Set[AnyStr], Callable, Optional[Broadcast]) -> RDD
    """
    计算出RDD差异后，怎样处理差异：handler来处理
    :param diff_rdd: [@id => ({JSON1}, {JSON2})]
    :param ignore: 忽略处理的字段
    :param handler: 处理每一条数据差异的函数，(Dict, Dict, Set[AnyStr], Optional[KgCnSchema]) -> 任意结果
    :param broadcast_schema: 可选的KgCnSchema，经过Spark broadcast后
    :return:
    """
    if broadcast_schema is None:
        return diff_rdd.flatMap(lambda x: handler(x[1][0], x[1][1], ignore))
    else:
        return diff_rdd.flatMap(lambda x: handler(x[1][0], x[1][1], ignore, broadcast_schema.value))