from pyspark.sql import SparkSession

# 创建SparkSession
spark = SparkSession.builder \
    .appName("HBaseConnectionExample") \
    .getOrCreate()

# 定义HBase的连接配置
hbase_host = "192.168.139.136"
hbase_table = "hot_search"

# 定义HBase表的Schema
schema = "row_key STRING, cf:hot_content STRING, cf:search_link STRING, cf:ranking_time STRING, cf:highest_rank STRING, cf:duration STRING, cf:duration_number STRING, cf:search_index_peak STRING, cf:platform STRING, cf:date_number STRING"

# 创建HBase连接配置
hbase_conf = {
    "hbase.zookeeper.quorum": hbase_host,
    "hbase.mapreduce.inputtable": hbase_table,
    "hbase.mapreduce.scan.columns": "cf:hot_content cf:search_link cf:ranking_time cf:highest_rank cf:duration cf:duration_number cf:search_index_peak cf:platform cf:date_number"
}

keyConv = "org.apache.spark.examples.pythonconverters.ImmutableBytesWritableToStringConverter"
valueConv = "org.apache.spark.examples.pythonconverters.HBaseResultToStringConverter"

hbase_rdd = spark.sparkContext.newAPIHadoopRDD("org.apache.hadoop.hbase.mapreduce.TableInputFormat",
                                               "org.apache.hadoop.hbase.io.ImmutableBytesWritable",
                                               "org.apache.hadoop.hbase.client.Result",
                                               keyConverter=keyConv,
                                               valueConverter=valueConv,
                                               conf=hbase_conf)

filtered_rdd = hbase_rdd.filter(lambda x: x[1].split(';')[1] == "男子在新疆滑雪摔伤抢救无效死亡")
filtered_rdd.foreach(print)

# # 读取HBase表数据
# df = spark.read \
#     .format("org.apache.hadoop.hbase.spark") \
#     .option("hbase.conf", hbase_conf) \
#     .option("hbase.columns.mapping", schema) \
#     .load()
#
# # 显示DataFrame中的数据
# # df.show()
#
# # 执行查询操作
# result = df.filter(df["hot_content"] == "男子在新疆滑雪摔伤抢救无效死亡").select("row_key", "cf:hot_content",
#                                                                                  "cf:search_link", "cf:ranking_time",
#                                                                                  "cf:highest_rank", "cf:duration",
#                                                                                  "cf:duration_number",
#                                                                                  "cf:search_index_peak", "cf:platform",
#                                                                                  "cf:date_number")
#
# # 显示查询结果
# result.show()

# 关闭SparkSession
spark.stop()
