package cn.wudl.tags.models.rule

import cn.wudl.tags.meta.HBaseMeta
import cn.wudl.tags.tools.HBaseTools
import org.apache.hadoop.hbase.client.{Put, Result}
import org.apache.hadoop.hbase.io.ImmutableBytesWritable
import org.apache.spark.SparkConf
import org.apache.spark.sql.{DataFrame, SparkSession}
import org.apache.spark.sql.expressions.UserDefinedFunction

object JobModel {

  def main(args: Array[String]): Unit = {


    // 创建sparkSession 的实例对象
    val spark: SparkSession = {
      //  创建sparkSession
      val sparkConf = new SparkConf().setMaster("local[4]").setAppName(this.getClass.getSimpleName.stripSuffix("$"))
        // 设置shuffle 的分区数目
        .set("spark.sql.shuffle.partitions", "4")
        .set("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
        .registerKryoClasses(Array(classOf[ImmutableBytesWritable], classOf[Result], classOf[Put]))

      // 2. 用构造者模式 构建SparkSession
      val session = SparkSession.builder().config(sparkConf)
        //与hive 集成
        .enableHiveSupport()
        // 设置与hive 集成
        .config("hive.metastore.uris", "thrift://192.168.1.140:9083")
        // 设置hive 的数仓目录
        .config("spark.sql.warehouse.dir", "hdfs://192.168.1.140:8020/user/hive/warehouse")
        .getOrCreate()
      // c. 返回会话对象
      session
    }
    import org.apache.spark.sql.functions._
    import spark.implicits._

    val tagTable: String =
      """
        |(
        |SELECT id, name, rule, level  FROM profile_tags.tbl_basic_tag WHERE id = 321
        |union
        |SELECT id, name, rule, level  FROM profile_tags.tbl_basic_tag WHERE pid = 321
        |) as tag_table
        |""".stripMargin

    val baseTagDF = spark.read.format("jdbc")
      .option("driver", "com.mysql.jdbc.Driver")
      .option("url", "jdbc:mysql://192.168.1.140:3306/?useUnicode=true&characterEncoding=UTF-8&serverTimezone=UTC")
      .option("dbtable", tagTable)
      .option("user", "root")
      .option("password", "123456")
      .load()
    baseTagDF.printSchema();
    baseTagDF.show(1000, truncate = false)


    // 解析标签rule 从hbase 读取业务数据
    val tagRule: String = baseTagDF.filter($"level" === 4).head().getAs[String]("rule")

    // 2.2 解析标签规则rule，封装值Map集合
    val tagRuleMap: Map[String, String] = tagRule.split("\\n").map {
      line =>
        val Array(attrKey, attrValue) = line.trim.split("=")
        (attrKey, attrValue)
    }.toMap

    println("************" + tagRuleMap.mkString(","))
    //2.3 判断数据源inType 读取业务数据
    var businessDF: DataFrame = null
    if ("hbase".equals(tagRuleMap("inType").toLowerCase)) {
      // 封装标签规则中数据源信息至HBaseMeta对象中
      val hbaseMate: HBaseMeta = HBaseMeta.getHBaseMeta(tagRuleMap)
      businessDF = HBaseTools.read(spark, hbaseMate.zkHosts, hbaseMate.zkPort, hbaseMate.hbaseTable, hbaseMate.family, hbaseMate.selectFieldNames.split(","))

    } else {
      // 如果未获取到数据，直接抛出异常
      new RuntimeException("业务标签未提供数据源信息，获取不到业务数据，无法计算标签")
    }
    //businessDF.printSchema()
    //businessDF.show(100, truncate = false)

    // 3. 业务数据结合标属性签数据，构建标签
    // 3.1 获取属性标签规则，转换为Map集合
    val attrTagRuleMap: Map[String, String] = baseTagDF
      .filter($"level" === 5) // 属性标签为5级标签
      .select($"rule", $"name")
      // 将DataFrame转换为Dataset，由于DataFrame中只有2个元素，封装值二元组（元组就是CaseClass）中
      .as[(String, String)]
      .rdd
      .collectAsMap().toMap
    val attrTagRuleMapBroadcast = spark.sparkContext.broadcast(attrTagRuleMap)
    // 3.2 自定义UDF函数
    val job_udf: UserDefinedFunction = udf(
      (job: String) => {
        attrTagRuleMapBroadcast.value(job)
      }
    )
    // 3.3 使用UDF函数，打标签
    val modelDF: DataFrame = businessDF.select(
      $"id".as("userId"), //
      job_udf($"job").as("job") //
    )

    // 4. 画像标签数据存储HBase表
    HBaseTools.write(
      modelDF, "bigdata-cdh01.itcast.cn", "2181", //
      "tbl_profile", "user", "userId"
    )

    spark.stop()
  }

}
