package com.niit.ODS

import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.functions._
import org.apache.spark.sql.types._
import org.apache.hadoop.fs.{FileSystem, Path}

object KafkaToHiveStream {
  def main(args: Array[String]): Unit = {

    System.setProperty("HADOOP_USER_NAME", "root")
    // 创建SparkSession，修改warehouse路径为根目录下的/warehouse
    val spark = SparkSession.builder()
      .appName("KafkaToHiveAdClickStream")
      .master("local[*]")
      .config("spark.testing.memory", "512000000")
      .config("spark.hadoop.fs.defaultFS", "hdfs://192.168.10.130:9000")
      .config("hive.metastore.uris", "thrift://192.168.10.130:9083")
      .config("spark.sql.warehouse.dir", "hdfs://192.168.10.130:9000/warehouse")  // 关键：去掉/training/hive前缀
      .enableHiveSupport()
      .getOrCreate()

    import spark.implicits._

    // #################### 创建目标路径：/warehouse/commerces/ods/ods_log/ods_user_ad_click ####################
    val hdfsPath = new Path("/warehouse/commerces/ods/ods_log/ods_user_ad_click")  // 目标路径
    val fs = FileSystem.get(spark.sparkContext.hadoopConfiguration)
    if (!fs.exists(hdfsPath)) {
      fs.mkdirs(hdfsPath)  // 递归创建路径
      println(s"已创建HDFS路径：${hdfsPath}")
    } else {
      println(s"HDFS路径已存在：${hdfsPath}")
    }


    // Kafka连接参数（保持不变）
    val kafkaParams = Map[String, String](
      "kafka.bootstrap.servers" -> "192.168.10.130:9092",
      "key.deserializer" -> "org.apache.kafka.common.serialization.StringDeserializer",
      "value.deserializer" -> "org.apache.kafka.common.serialization.StringDeserializer",
      "subscribe" -> "AdRealTimeLog",
      "group.id" -> "250715",
      "startingOffsets" -> "latest",
      "failOnDataLoss" -> "false",
      "maxOffsetsPerTrigger" -> "10000"
    )

    // 读取并解析Kafka数据（保持不变）
    val kafkaStreamDF = spark.readStream.format("kafka").options(kafkaParams).load()
    val parsedDF = kafkaStreamDF
      .selectExpr("CAST(value AS STRING)")
      .as[String]
      .map(_.split(" "))
      .map { parts =>
        (parts(0).toLong, parts(1).toInt, parts(2).toInt, parts(3).toLong, parts(4).toInt)
      }
      .toDF("timestamp", "province", "city", "userid", "adid")


    // 创建Hive表（LOCATION改为目标路径）
    spark.sql("USE commerces")
    spark.sql(
      """
        |CREATE EXTERNAL TABLE IF NOT EXISTS ods_user_ad_click (
        |  timestamp BIGINT,
        |  province INT,
        |  city INT,
        |  userid BIGINT,
        |  adid INT
        |)
        |STORED AS PARQUET
        |LOCATION '/warehouse/commerces/ods/ods_log/ods_user_ad_click'  -- 目标路径
      """.stripMargin)

    // 写入数据（路径改为目标路径）
    val query = parsedDF
      .writeStream
      .outputMode("append")
      .format("parquet")
      .option("path", "/warehouse/commerces/ods/ods_log/ods_user_ad_click")  // 目标路径
      .trigger(org.apache.spark.sql.streaming.Trigger.ProcessingTime("10 seconds"))
      .option("checkpointLocation", "/warehouse/commerces/ods/checkpoints/adclicks")  // checkpoint也放在目标路径下
      .start()

    query.awaitTermination()
    spark.stop()
  }
}