package task1.clustertask

import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.functions.{col, count, to_timestamp, when}

object ClusterUserConversionFunnel {
  def main(args: Array[String]): Unit = {
    // 集群环境配置

    val INPUT_PATH = "file:///home/spark-stand/input/merged.csv"
    val ITEM_BEHAVIOR_OUTPUT = "file:///home/spark-stand/output/user_item_behavior.csv"
    val PURCHASE_DESIRE_OUTPUT = "file:///home/spark-stand/output/user_purchase_desire.csv"

    val spark = SparkSession.builder()
      .appName("UserBehaviorAnalysis-Cluster")
      .master("spark://hai41:7077") // 集群模式
      .getOrCreate()

    import spark.implicits._

    // 数据处理逻辑（与本地版本完全相同）
    val data = spark.read
      .option("header", "true")
      .option("inferSchema", "true")
      .csv(INPUT_PATH)
      .withColumn("timestamp", to_timestamp(col("time"), "yyyy-MM-dd HH"))

    // 筛选浏览行为
    val browseData = data.filter($"behavior_type" === 1)
      .select("user_id", "item_id", "timestamp")
      .withColumnRenamed("timestamp", "browse_time")

    // 筛选其他行为
    val otherBehaviorData = data.filter($"behavior_type" =!= 1)
      .select("user_id", "item_id", "timestamp", "behavior_type")

    // 关联数据并判断后续行为
    val joinedData = browseData.join(
      otherBehaviorData,
      Seq("user_id", "item_id"),
      "left_outer"
    ).filter($"timestamp" > $"browse_time")

    // 计算每个用户对每个浏览商品的后续行为情况
    val itemBehaviorResult = joinedData.groupBy("user_id", "item_id")
      .agg(
        when(count($"behavior_type") > 0, 1).otherwise(0).alias("has_other_behavior")
      )

    // 计算浏览总次数和有后续行为的商品数
    val totalBrowseCounts = browseData.groupBy("user_id")
      .agg(count("*").alias("total_browse_count"))

    val itemsWithBehavior = itemBehaviorResult.filter($"has_other_behavior" === 1)
      .groupBy("user_id")
      .agg(count("item_id").alias("items_with_other_behavior"))

    // 计算购买欲
    val purchaseDesireResult = totalBrowseCounts.join(
      itemsWithBehavior,
      Seq("user_id"),
      "left_outer"
    ).withColumn("items_with_other_behavior",
      when($"items_with_other_behavior".isNull, 0).otherwise($"items_with_other_behavior")
    ).withColumn("purchase_desire",
      $"items_with_other_behavior" / $"total_browse_count"
    )

    // 保存结果到HDFS路径
    itemBehaviorResult.write
      .option("header", "true")
      .mode("overwrite")
      .csv(ITEM_BEHAVIOR_OUTPUT)

    purchaseDesireResult.write
      .option("header", "true")
      .mode("overwrite")
      .csv(PURCHASE_DESIRE_OUTPUT)

    println("集群模式运行完成，结果保存在：")
    println(s"- ${ITEM_BEHAVIOR_OUTPUT}")
    println(s"- ${PURCHASE_DESIRE_OUTPUT}")

    spark.stop()
  }
}
