package cn.itcast.tags.models.rule

import cn.itcast.tags.meta.HBaseMeta
import cn.itcast.tags.tools.HBaseTools
import org.apache.hadoop.hbase.client.{Put, Result}
import org.apache.hadoop.hbase.io.ImmutableBytesWritable
import org.apache.spark.SparkConf
import org.apache.spark.broadcast.Broadcast
import org.apache.spark.internal.Logging
import org.apache.spark.sql.expressions.UserDefinedFunction
import org.apache.spark.sql.{DataFrame, SparkSession}
import org.apache.spark.storage.StorageLevel
import org.apache.spark.sql.functions._
import org.apache.spark.sql.types.StringType

object JobModel extends Logging {

  def main(args: Array[String]): Unit = {
    val spark: SparkSession = {
      val sparkConf: SparkConf = new SparkConf()
        .setMaster("local[4]")
        .setAppName(this.getClass.getSimpleName.stripSuffix("$"))
        .set("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
        .registerKryoClasses(Array(classOf[ImmutableBytesWritable], classOf[Result], classOf[Put]))

        .set("spark.sql.shuffle.partitions", "4")
        .set("hive.metastore.uris", "thrift://bigdata-cdh01.itcast.cn:9083")
        .set("spark.sql.warehouse.dir", "/user/hive/warehouse")
      val session = SparkSession.builder()
        .config(sparkConf)
        .enableHiveSupport()
        .getOrCreate()
      session
    }
    import spark.implicits._

    val tagTable: String =
      """
        |(
        |SELECT `id`,
        | `name`,
        | `rule`,
        | `level`
        |FROM `profile_tags`.`tbl_basic_tag`
        |WHERE id = 321
        |UNION
        |SELECT `id`,
        | `name`,
        | `rule`,
        | `level`
        |FROM `profile_tags`.`tbl_basic_tag`
        |WHERE pid = 321
        |ORDER BY `level` ASC, `id` ASC
        |) AS basic_tag
        |""".stripMargin
    val basicTagDF: DataFrame = spark.read.format("jdbc")
      .option("driver", "com.mysql.jdbc.Driver")
      .option("url", "jdbc:mysql://bigdata-cdh01.itcast.cn:3306/?useUnicode=true&characterEncoding=UTF-8&serverTimezone=UTC")
      .option("dbtable", tagTable)
      .option("user", "root")
      .option("password", "123456")
      .load()

    basicTagDF.persist(StorageLevel.MEMORY_AND_DISK).count()

    val tagRule: String = basicTagDF
      .filter($"level" === 4)
      .head()
      .getAs[String]("rule")
    logInfo(s"==== 业务标签数据规则: {$tagRule} ====")

    val ruleMap: Map[String, String] = tagRule.split("\n")
      .map {
        line =>
          val Array(attrName, attrValue) = line.trim.split("=")
          (attrName, attrValue)
      }.toMap
    logWarning(s"============ { ${ruleMap.mkString(", ")} } ===========")

    var businessDF: DataFrame = null

    if ("hbase".equals(ruleMap("inType").toLowerCase)) {
      val hBaseMeta: HBaseMeta = HBaseMeta.getHBaseMeta(ruleMap)
      businessDF = HBaseTools.read(
        spark, hBaseMeta.zkHosts, hBaseMeta.zkPort,
        hBaseMeta.hbaseTable, hBaseMeta.family, hBaseMeta.selectFieldNames.split(",").toSeq
      )
    } else {
      new RuntimeException("业务标签未提供数据源信息，获取不到业务数据，无法 计算标签")
    }

    val attrMap: Map[String, String] = basicTagDF
      .filter($"level" === 5)
      .select(
        $"rule",
        $"id".as("tagId")
      )
      .as[(String, String)]
      .rdd
      .collectAsMap()
      .toMap

    val attrTagMapBroadcast: Broadcast[Map[String, String]] = spark.sparkContext.broadcast(attrMap)

    val job_to_tag: UserDefinedFunction = udf(
      (job: String) => {
        attrTagMapBroadcast.value(job)
      }
    )

    val modelDF: DataFrame = businessDF.select(
      $"id".cast(StringType).as("uid"),
      job_to_tag($"job").as("tagId")
    )

    basicTagDF.unpersist()

    val profileDF: DataFrame = HBaseTools.read(spark, "bigdata-cdh01.itcast.cn", "2181",
      "tbl_profile", "user", Seq("userId", "tagIds")
    )

    val mergeDF: DataFrame = modelDF.join(profileDF, modelDF("uid") === profileDF("userId"), "left")

    val merge_tags_udf: UserDefinedFunction = udf(
      (tagId: String, tagIds: String) => {
        tagIds.split(",").:+(tagId).distinct.mkString(",")
      }
    )

    val newProfileDF: DataFrame = mergeDF.select(
      $"uid".as("userId"),
      when($"tagId".isNull, $"tagId")
        .otherwise(merge_tags_udf($"tagId", $"tagIds")).as("tagIds")
    )
    HBaseTools.write(
      newProfileDF, "bigdata-cdh01.itcast.cn", "2181",
      "tbl_profile", "user", "userId"
    )
    spark.stop()
  }
}
