from pyspark.sql import SparkSession
from graphframes import GraphFrame


# 初始化 SparkContext

spark = (
    SparkSession.builder
    .master("local[*]")
    .appName("Spark Hive Example")
    .config("spark.home", "/Users/wangbo/opt/module/spark-3.3.2-bin-hadoop3")
    .config("spark.driver.extraClassPath",
            "/Users/wangbo/opt/module/spark-3.3.2-bin-hadoop3/jars/graphframes-0.8.1-spark3.0-s_2.12.jar")
    # .config("spark.driver.extraClassPath",
    #         "/Users/wangbo/opt/module/spark-3.3.2-bin-hadoop3/jars/postgresql-42.4.1.jar")
    .getOrCreate()
)
sc = spark.sparkContext
sc.setCheckpointDir("/Users/wangbo/opt/module/")
# 读取数据并创建DataFrame
data1 = [("device_id1", "user_id1"), ("device_id2", "user_id2"), ("device_id3", "user_id3"), ("device_id4", "user_id4")]
data2 = [("user_id1", "phone1"), ("user_id2", "phone2"), ("user_id3", "phone3")]
data3 = [("phone1", "openid1"), ("phone2", "openid2"), ("phone3", "openid3")]
data4 = [("openid1", "idfa1"), ("openid2", "idfa2"), ("openid3", "idfa3")]
data5 = [("idfa1", "idfv1"), ("idfa2", "idfv2"), ("idfa3", "idfv3")]

df1 = spark.createDataFrame(data1, ["device_id", "user_id"])
df2 = spark.createDataFrame(data2, ["user_id", "phone"])
df3 = spark.createDataFrame(data3, ["phone", "openid"])
df4 = spark.createDataFrame(data4, ["openid", "idfa"])
df5 = spark.createDataFrame(data5, ["idfa", "idfv"])

# 对不同类型的数据进行映射
mapping_df = df1.join(df2, on="user_id").select("device_id", "phone")
mapping_df = mapping_df.join(df3, on="phone").select("device_id", "openid")
mapping_df = mapping_df.join(df4, on="openid").select("device_id", "idfa")
mapping_df = mapping_df.join(df5, on="idfa").select("device_id", "idfv")

# 使用映射后的ID创建边
edges = mapping_df.rdd.map(lambda x: (x[0], x[1])).toDF(["src", "dst"])
vertices = edges.selectExpr("src as id").unionAll(edges.selectExpr("dst as id")).distinct()

# 使用GraphFrames库构建图
graph = GraphFrame(vertices, edges)

# 计算顶点之间的连接关系
result = graph.connectedComponents()


result.show()


spark.stop()