package com.niit.DWD

import org.apache.spark.sql.{SaveMode, SparkSession}
import org.apache.log4j.{Level, Logger}
import java.text.SimpleDateFormat
import java.util.Date
import org.apache.hadoop.fs.{FileSystem, Path}
import org.apache.spark.sql.functions._

object DwdUserAdClick {
  def main(args: Array[String]): Unit = {
    // 设置日志级别
    Logger.getLogger("org").setLevel(Level.WARN)
    Logger.getLogger("akka").setLevel(Level.WARN)
    val logger = Logger.getLogger(getClass.getName)

    // 参数处理：获取日期（格式：yyyy-MM-dd）
    val dt = if (args.length > 0) args(0) else {
      val sdf = new SimpleDateFormat("yyyy-MM-dd")
      sdf.format(new Date())
    }
    logger.info(s"处理日期: $dt")

    // HDFS配置（根据实际环境修改）
    val hdfsUri = "hdfs://192.168.10.130:9000"
    val tableLocation = s"$hdfsUri/training/hive/warehouse/commerces/dwd/dwd_user_ad_click"

    // 初始化SparkSession（生产环境删除master参数）
    val spark = SparkSession.builder()
      .appName("ODS to DWD: dwd_user_ad_click")
      .master("local[*]")
      .config("hive.metastore.uris", "thrift://192.168.10.130:9083")
      .config("spark.hadoop.fs.defaultFS", hdfsUri)
      .enableHiveSupport()
      .getOrCreate()

    import spark.implicits._

    try {
      spark.sql("USE commerces")

      // 关键配置：关闭动态分区严格模式
      logger.info("设置动态分区模式为nonstrict...")
      spark.sql("set hive.exec.dynamic.partition=true")
      spark.sql("set hive.exec.dynamic.partition.mode=nonstrict")

      // 清理HDFS残留目录
      val fs = FileSystem.get(spark.sparkContext.hadoopConfiguration)
      val tablePath = new Path(tableLocation)
      if (fs.exists(tablePath)) {
        logger.warn(s"发现残留表目录，删除: $tableLocation")
        fs.delete(tablePath, true)
      }
      fs.mkdirs(tablePath)
      fs.setPermission(tablePath, new org.apache.hadoop.fs.permission.FsPermission("775"))

      // 1. 创建DWD表（显式指定Parquet格式）
      logger.info("创建DWD表 dwd_user_ad_click（Parquet格式）")
      spark.sql(
        s"""
           |CREATE EXTERNAL TABLE IF NOT EXISTS dwd_user_ad_click (
           |  ad_click_time BIGINT COMMENT '广告点击时间戳（毫秒）',
           |  user_id BIGINT COMMENT '用户ID',
           |  ad_id INT COMMENT '广告ID',
           |  ad_position_id BIGINT COMMENT '广告位置ID（页面ID）',
           |  province_id INT COMMENT '省份ID',
           |  city_id INT COMMENT '城市ID',
           |  action_time STRING COMMENT '用户行为时间（原始格式）'
           |)
           |PARTITIONED BY (click_date STRING)
           |ROW FORMAT SERDE 'org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe'
           |STORED AS INPUTFORMAT 'org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat'
           |OUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.parquet.MapredParquetOutputFormat'
           |LOCATION '$tableLocation'
           |TBLPROPERTIES (
           |  'parquet.compression'='snappy',
           |  'transient_lastDdlTime'='${System.currentTimeMillis()}'
           |)
        """.stripMargin)

      // 2. 关联ODS表数据
      logger.info(s"关联ODS表，读取 $dt 的广告点击数据...")
      val joinedData = spark.sql(
        s"""
           |SELECT
           |  oc.timestamp AS ad_click_time,
           |  oc.userid AS user_id,
           |  oc.adid AS ad_id,
           |  ov.page_id AS ad_position_id,
           |  oc.province AS province_id,
           |  oc.city AS city_id,
           |  ov.action_time AS action_time
           |FROM ods_user_ad_click oc
           |INNER JOIN ods_user_visit_action ov
           |  ON oc.userid = ov.user_id
           |  AND date(ov.action_time) = '$dt'
           |  AND abs(oc.timestamp - unix_timestamp(ov.action_time) * 1000) <= 30000
           |WHERE oc.timestamp IS NOT NULL
           |  AND oc.userid IS NOT NULL
           |  AND oc.adid IS NOT NULL
           |  AND ov.page_id IS NOT NULL
           |  AND oc.city IS NOT NULL
           |  AND date(from_unixtime(oc.timestamp / 1000)) = '$dt'
        """.stripMargin)

      // 3. 数据质量校验
      val validData = joinedData
        .filter("ad_click_time > 0")
        .filter("user_id > 0")
        .filter("ad_id > 0")
        .filter("ad_position_id > 0")

      // 4. 补充分区字段
      val validDataWithPartition = validData.withColumn("click_date", lit(dt))

      val total = joinedData.count()
      val valid = validDataWithPartition.count()
      logger.info(s"数据质量校验：原始数据 $total 条，有效数据 $valid 条，无效数据 ${total - valid} 条")
      if (valid == 0) {
        logger.warn(s"$dt 无有效广告点击数据，跳过写入")
      } else {
        // 5. 写入DWD表（动态分区）
        logger.info(s"写入 $dt 分区数据到 dwd_user_ad_click...")
        validDataWithPartition.write
          .mode(SaveMode.Append)
          .insertInto("commerces.dwd_user_ad_click")

        // 6. 修复分区元数据
        logger.info("修复分区元数据...")
        spark.sql(s"MSCK REPAIR TABLE dwd_user_ad_click")
        logger.info(s"$dt 广告点击数据处理完成！写入 ${valid} 条有效数据")
      }

    } catch {
      case e: Exception =>
        logger.error(s"处理失败: ${e.getMessage}", e)
        e.printStackTrace()
        System.exit(1)
    } finally {
      spark.stop()
      logger.info("SparkSession已关闭")
    }
  }
}