import org.apache.spark.sql.types.{LongType, MapType, StringType, StructField, StructType}
import org.apache.spark.sql.{Row, SparkSession}

import scala.util.Random

/**
 * TraceTransformer
 * 主要功能是点查，随机加盐打散即可，select的时候选出非Null的列，并且选择update time最新的结果
 * 如果算子的升级次数过多，会影响查询性能。但是后续升级的概率会越来越小
 */
class TraceTransformer(spark: SparkSession , destTable: String, saltNum:Int=1) extends AbstractTransformer(spark, destTable) {
  
  /**
   * 执行数据转换 - 使用RDD reduceByKey
   */
  def transform(dt: String, hour: String, ifTest: Boolean = false): Unit = {
    
    // 读取数据
    val pathIn = if (ifTest) 
      s"oss://risk-ml-featurestore/trace_feature_test/dt=$dt/hour=$hour" else
      s"oss://risk-ml-featurestore/trace_feature/dt=$dt/hour=$hour"

    logger.info(s"start transform $dt/$hour to $destTable, src: $pathIn")
    val df = spark.read.parquet(pathIn)
    
    // 获取operator列表并广播
    val opList = getOperatorColumnsFromDestTable(destTable)
    val opListBroadcast = spark.sparkContext.broadcast(opList)
    // 广播saltNum,避免引用this，导致task not serializable
    val saltNumBroadcast = spark.sparkContext.broadcast(saltNum)
    // 1. 将DataFrame转换为TraceRecord RDD
    val traceRecordRDD = df.rdd.map { row =>
      TypeConverters.rowToTraceRecord(row)
    }
    
    // 2. 按traceid, accountid列和day_part进行聚合
    val aggregatedRDD = traceRecordRDD
      .keyBy { record =>
        val r = new Random(record.trace_id)
        (record.trace_id, r.nextInt(saltNumBroadcast.value))
      }
      .reduceByKey(
        (acc1: TraceRecord, acc2: TraceRecord) =>
          TraceRecord(
            acc1.trace_id,
            acc1.account_id,
            acc1.day_part,
            acc1.operator_features ++ acc2.operator_features,
            acc1.calc_time
          )
      )
    
    // 3. 将TraceRecord转换为展开后的Row
    val expandedRowRDD = aggregatedRDD.map { case (_, record) =>
      TraceTransformer.expandMapsToRow(opListBroadcast.value, record)
    }
    
    // 4. 创建输出schema
    val baseFields = Seq(
      StructField("trace_id", LongType, false),
      StructField("account_id", LongType, false),
      StructField("calc_time", LongType, false),
      StructField("day_part", LongType, false)
    )
    
    val opFields = opList.map { op =>
      StructField(op + "_fm", MapType(LongType, 
        StructType(Seq(
          StructField("value", StringType, true),
          StructField("updated_time", LongType, false)
      ))), true)
    }
    
    val outputSchema = StructType(baseFields ++ opFields)
    
    // 5. 创建新的DataFrame
    val resultDF = spark.createDataFrame(expandedRowRDD, outputSchema)
    
    // 8. 写入目标表
    logger.info(s"write to paimon $destTable")
    import spark.implicits._
    resultDF.hint("rebalance", $"day_part".expr).write.mode("append").insertInto(destTable)
  }
}

object TraceTransformer {
  /**
   * 展开特征映射并直接输出Row - 静态方法
   */
  def expandMapsToRow(opList: List[String], traceRecord: TraceRecord): Row = { 
    // 根据opList动态添加operator列
    val opValues = Utils.transformOperatorFeatures(opList, traceRecord.operator_features) 
    
    // 合并基础列和operator列
    Row.fromSeq(Seq(traceRecord.trace_id, traceRecord.account_id, traceRecord.calc_time, traceRecord.day_part)
     ++ opValues)
  }
}