from pyspark.sql import SparkSession
from pyspark.sql.functions import col, explode, udf
from pyspark.sql.types import StringType, MapType, ArrayType
import json

from SparkSessionBase import SparkSessionBase


class BusinessAttributesAnalyzer(SparkSessionBase):
    SPARK_URL = "yarn"
    SPARK_APP_NAME = 'BusinessAttributesAnalyzer'
    ENABLE_HIVE_SUPPORT = True

    def __init__(self):
        self.spark = self._create_spark_session()
        self.spark.sparkContext.setLogLevel("ERROR")  # 设置日志级别为 ERROR，减少日志

    def run(self):
        # 读取 business 表
        business_df = self.spark.table('business')

        # 定义一个 UDF 函数，将字符串解析为嵌套的 Map 类型
        def parse_attributes(attributes_str):
            if not attributes_str or attributes_str == "":
                return {}
            try:
                # 使用 json.loads 解析字符串为字典
                return json.loads(attributes_str.replace("'", "\""))
            except:
                return {}

        parse_attributes_udf = udf(parse_attributes, MapType(StringType(), StringType()))

        # 将 attributes 字段解析为 Map 类型
        business_df_with_attributes = business_df.withColumn(
            "attributes",
            parse_attributes_udf(col("attributes"))
        )

        # 展平 attributes 字段
        business_df_exploded = business_df_with_attributes.select(
            "business_id",
            "name",
            explode("attributes").alias("attribute_key", "attribute_value")
        )

        # 提取不同的 attributes
        distinct_attributes_df = business_df_exploded.select(
            "attribute_key",
            "attribute_value"
        ).distinct()

        # 按 attribute_key 排序
        sorted_attributes_df = distinct_attributes_df.orderBy("attribute_key")

        # 显示结果
        sorted_attributes_df.show(truncate=False)

        # 显示总共有多少条
        total_count = sorted_attributes_df.count()
        print(f"总共有 {total_count} 条不同的属性记录")

        return sorted_attributes_df


if __name__ == '__main__':
    job = BusinessAttributesAnalyzer()
    job.run()