from pyspark.sql import SparkSession, dataframe
from pyspark.sql.functions import from_json
from pyspark.sql.types import StructType, StringType, IntegerType, LongType, BooleanType, DoubleType



spark = SparkSession.builder.appName('smart_fina') \
    .master("local[*]") \
    .config("spark.sql.shuffle.partitions", "4") \
    .config("hive.metastore.uris", "thrift://localhost:9083") \
    .config('spark.jars.packages', 'org.apache.spark:spark-sql-kafka-0-10_2.12:3.2.2') \
    .enableHiveSupport() \
    .getOrCreate()

# 结构化流分三部 ： 输入 加工 输出
# 输入，读取kafka结构化消息（非结构化消息读取出的是DStream，结构化消息读取出的是dataframe）
# 输入数据的格式{"username":"yefei","age":40}
df = spark \
    .readStream \
    .format("kafka") \
    .option("kafka.bootstrap.servers", "localhost:9092") \
    .option("subscribe", "test-topic") \
    .load()

# 加工，解析输入数据
words = df.selectExpr("CAST(value AS STRING)") #此时word只有一个列value,数据无结构
schema = StructType() \
    .add("database", StringType()) \
    .add("table", StringType()) \
    .add("type", StringType()) \
    .add("ts", LongType()) \
    .add("xid", LongType()) \
    .add("commit", BooleanType()) \
    .add("data", StringType()) \
    .add("old", StringType())
data = words.select(from_json("value", schema).alias("data")).select("data.*")#data是包含结构的dataframe，此时的结构mysql binlog的结构

schema_hw_facility_toilet_detail = StructType() \
    .add("id",IntegerType()) \
    .add("code",StringType()) \
    .add("kwh",DoubleType()) \
    .add("water_meter",DoubleType()) \
    .add("temperature",DoubleType()) \
    .add("humidity",DoubleType()) \
    .add("pm2_5",IntegerType()) \
    .add("pm10",IntegerType()) \
    .add("co2",IntegerType()) \
    .add("nh3",IntegerType()) \
    .add("h2s",IntegerType()) \
    .add("passenger_volume",IntegerType()) \
    .add("date",IntegerType()) \
    .add("time",IntegerType())

def foreachBatch(df:dataframe, batch_id):
    print(f"第{batch_id}批数据")
    df.show()
    for row in df.collect():
        print(row[1])
        table = row[1] #表名
        if table == "hw_facility_toilet_detail":
            res= df.select(from_json("data", schema_hw_facility_toilet_detail).alias("res")).select("res.*")
            res.write.format("hive").mode("append").saveAsTable('toilet.ods_hw_facility_toilet_detail')
        else:
            print("没有匹配的schame")
            #raise Exception("没有匹配的schame")
        res.show()
# 输出
query = data.writeStream.foreachBatch(foreachBatch).outputMode("append").trigger(
    processingTime="5 seconds").start()

query.awaitTermination()
