package com.leal.hive

import com.alibaba.fastjson.{JSON, JSONObject}
import com.leal.util.{DateUtil, SparkLoggerTrait, SparkUtil}
import org.apache.log4j.{Level, Logger}
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.functions.{col, from_json, get_json_object, hash, lit}
import org.apache.spark.sql.types.{ArrayType, BooleanType, DoubleType, IntegerType, LongType, StringType, StructField, StructType}
import org.apache.spark.sql.{DataFrame, Dataset, Row, SaveMode, SparkSession}

import scala.collection.mutable.ArrayBuffer

/**
 * @ClassName bigdata
 * @Description 解析埋点表
 * @Date 2024/12/23 16:19
 * @Created by leal
 */
object ParseJson extends SparkLoggerTrait {

  private val logger: Logger = Logger.getLogger(this.getClass)

  def main(args: Array[String]): Unit = {

    val spark: SparkSession = SparkUtil.initSpark(enableHive = true)
    // 假设有多个表和多个日期需要解析
    val hiveTables: String = "cx_test,cx_test1,cx_test2"
    val dates: String = "20241201,20241202"
    val hiveTableArray: Array[String] = hiveTables.split(",")
    val dateArray: Array[String] = dates.split(",")

    spark.conf.set("hive.exec.dynamic.partition", "true")
    spark.conf.set("hive.exec.dynamic.partition.mode", "nonstrict")

    // 解析表得到frame
    val (y, m, d) = DateUtil.splitDate(dateArray(0))
    val frame: DataFrame = spark.sql(
      s"""
         |SELECT table_name, content FROM cx_ads_safe.flu_kfk_dpp_test
         |WHERE  y = '$y'
         |AND m = '$m'
         |AND d = '$d'
         |AND table_name IN (${hiveTableArray.map("'" + (_: String) + "'").mkString(",")});
         |""".stripMargin)

    // 设定表的字段和类型
    val tableInfos: Map[String, (String, String)] = Map(
      "cx_test" -> ("id,name,age,scores", "int,string,int,boolean,array"),
      "cx_test1" -> ("id,name,age,isStudent", "int,string,int,boolean"),
      "cx_test2" -> ("id,name,age", "int,string,int")
    )
    // 并发设置
    val isParallel: Boolean = true
    if (isParallel) {
      logger.info("开启多线程解析")
      val numPartitions: Int = 4
      val rdd: RDD[(String, String)] = spark.sparkContext.parallelize(hiveTableArray.zip(dateArray), numPartitions)
      logger.info(s"开始解析数据,hive表为${hiveTableArray.mkString(",")},日期${dateArray.mkString(",")}")
      val unit: Unit = frame.repartition(numPartitions, col("table_name"))
        .foreachPartition({ partition: Iterator[Row] =>
          partition.foreach((row: Row) => {
            val table: String = row.getAs[String]("table_name")
            val parsedRow: Row = parseJsonByRow(row, tableInfos(table)._1)
            logger.info(s"解析的结果为 $table -> [${parsedRow.mkString(",")}]")
            (table, parsedRow)
          })
        })
    } else {
      logger.info("关闭多线程解析")
      hiveTableArray.foreach((table: String) => parseSingleTable(frame, table, dateArray(0)))
    }

    SparkUtil.closeSpark(spark)
  }

  private def parseSingleTable(frame: DataFrame, hiveTable: String, dt: String): Unit = {

    logger.info(s"Phase 1: 开始解析数据,hive表为$hiveTable,日期$dt")
    try {
      val filteredFrame: Dataset[Row] = frame.filter(col("table_name") === hiveTable)
      //filteredFrame.show()
      // 设定解析的字段和类型
      val fields = "id,name,age,isStudent,scores"
      val types = "int,string,int,boolean,array"
      // 数据格式为 [{"id":232,"name":"leal"}]
      // schema 信息
      // 将字段和数据类型转换为数组
      val fieldArray: Array[String] = fields.split(",")
      val typeArray: Array[String] = types.split(",")

      // 创建Schema
      val structFields: Array[StructField] = fieldArray.zip(typeArray).map { case (field, dataType) =>
        dataType.toLowerCase match {
          case "string" => StructField(field, StringType, nullable = true)
          // 可以在这里添加其他数据类型的处理
          case "int" => StructField(field, IntegerType, nullable = true)
          case "long" => StructField(field, LongType, nullable = true)
          case "double" => StructField(field, DoubleType, nullable = true)
          case "boolean" => StructField(field, BooleanType, nullable = true)
          case "array" => StructField(field, ArrayType(StringType), nullable = true)
          case dt if Seq("map", "struct").contains(dt) => StructField(field, StructType(Array(StructField("name", StringType))), nullable = true)
          case _ => StructField(field, StringType, nullable = true)
          // case _ => throw new IllegalArgumentException(s"Unsupported data type: $dataType")
        }
      }

      // 定义 JSON 数据的 Schema
      val schema: StructType = StructType(structFields)
      logger.info(s"schema 信息为 $schema")
      // 提取 JSON 数组中的第一个元素
      // val extractedJson: DataFrame = frame.withColumn("extracted_content", get_json_object(col("content"), "$[0]"))

      // 解析提取的 JSON 对象
      val contents: DataFrame = filteredFrame.withColumn("info", from_json(get_json_object(col("content"), "$[0]"), schema))
      logger.info("解析的结果如下")
      //contents.show(false)

      val selectedColumns: List[String] = fieldArray.map((colName: String) => s"info.$colName").toList

      val selectedFrame: DataFrame = contents.select(
        selectedColumns.map((colName: String) =>
          col(colName).as(colName.split("\\.")(1))
        ): _*
      )

      // 将 DataFrame 写入 Hive 表
      logger.info("写入结果如下")
      // selectedFrame.show()
      selectedFrame.withColumn("dt", lit(dt))
        .write
        .mode(SaveMode.Overwrite)
        .insertInto(s"cx_ads_safe.$hiveTable")
      // 后续操作
    } catch {
      case e: Exception =>
        logger.error(s"解析数据失败,hive表为$hiveTable,日期$dt,错误信息为${e.getMessage}")
        e.printStackTrace()
    }
  }

  private def parseJsonByRow(row: Row, fields: String) = {
    // 解析提取的 JSON 对象
    val info: JSONObject = JSON.parseObject(JSON.parseArray(row.getAs[String]("content")).get(0).toString)
    Row.fromSeq(
      fields.split(",").map((field: String) => {
        info.getOrDefault(field, null)
      })
    )
  }
}
