# -*- coding: utf-8 -*-

from __future__ import unicode_literals
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division

from graphframes import GraphFrame
from pyspark.sql.types import StructType, StructField, StringType, Row
from kgpipeline.sparkutil import current_session
from .util import merge_entity_attributes


def calc(entity, link, **kwargs):
    """
    computes the connected component membership of each entity, and assign a component ID.

    Parameters
    ----------
    entity : RDD
    link : RDD
    kwargs : dict

    Returns
    -------
    [entity, link] : [RDD, RDD]
        entity and link RDD, the result is attached to entity RDD with field ``component``
    """
    entity_rdd = entity.map(lambda x: (x["@id"], x))

    vertices_schema = StructType([StructField("id", StringType())])
    edges_schema = StructType([StructField("src", StringType()), StructField("dst", StringType())])
    v = current_session().createDataFrame(entity.map(lambda x: Row(x["@id"])), vertices_schema)
    e = current_session().createDataFrame(link.map(lambda x: Row(x["in"], x["out"])).distinct(), edges_schema)
    g = GraphFrame(v, e)
    cc = g.connectedComponents(algorithm="graphx")

    cc_rdd = cc.rdd.map(lambda x: (x["id"], x["component"]))
    entity_rdd = merge_entity_attributes(entity_rdd, cc_rdd, "sysGraphConnectedComponents")

    return [entity_rdd.map(lambda x: x[1]), link]
