package cn.itcast.tags.models.rule

import cn.itcast.tags.meta.HBaseMeta
import cn.itcast.tags.models.rule.GenderModel.logWarning
import cn.itcast.tags.tools.HBaseTools
import org.apache.hadoop.hbase.client.{Put, Result}
import org.apache.hadoop.hbase.io.ImmutableBytesWritable
import org.apache.spark.SparkConf
import org.apache.spark.broadcast.Broadcast
import org.apache.spark.internal.Logging
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.expressions.UserDefinedFunction
import org.apache.spark.sql.{DataFrame, Dataset, SparkSession}

/**
 * 标签模型应用开发：用户职业标签
 *   1学生、2公务员、3军人、4警察、5教师、6白领，相关字段信息
 */
object JobModel extends Logging{
  def main(args: Array[String]): Unit = {
    // 创建SparkSession实例对象
    val spark: SparkSession = {

      // a. 创建SparkConf,设置应用相关配置
      val sparkConf: SparkConf = new SparkConf()
        .setMaster("local[4]")
        .setAppName(this.getClass.getSimpleName.stripSuffix("$"))
         // 设置序列化为：Kryo
        .set("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
        .set("spark.sql.shuffle.partitions", "4")
        .registerKryoClasses(
          Array(classOf[ImmutableBytesWritable],classOf[Result],classOf[Put])
        )

      // b. 建造者模式创建SparkSession会话实例对象
      val spark = SparkSession.builder()
        .config(sparkConf)
        // 启用与Hive集成
        .enableHiveSupport()
        // 设置与Hive集成: 读取Hive元数据MetaStore服务
        .config("hive.metastore.uris", "thrift://bigdata-cdh01.itcast.cn:9083")
        // 设置数据仓库目录,设置成hive的数据仓库目录
        .config( "spark.sql.warehouse.dir", "hdfs://bigdata-cdh01.itcast.cn:8020/user/hive/warehouse" )
        .getOrCreate()
        // c. 返回会话对象
        spark
    }
    import spark.implicits._
    import org.apache.spark.sql.functions._

    // TODO: 2. 从MySQL数据库读取标签数据（基础标签表：tbl_basic_tag），依据业 务标签ID读取
    val tagTable:String=
      """
        |(
        |select id,name,rule,level from tbl_basic_tag where id=321
        |union
        |select id,name,rule,level from tbl_basic_tag where pid=321
        |) as tag_table
        |""".stripMargin

    val basicTagDF: DataFrame = spark.read.format("jdbc")
      .option("driver", "com.mysql.jdbc.Driver")
      .option("url", "jdbc:mysql://bigdata-cdh01.itcast.cn:3306/profile_tags?useUnicode=true&characterEncoding=UTF8&serverTimezone=UTC")
      .option("dbtable", tagTable)
      .option("user", "root")
      .option("password", "123456")
      .load()



    // TODO: 3. 依据业务标签规则获取业务数据，比如到HBase数据库读取表的数据
    val tagRule: String = basicTagDF
      .filter($"level" === 4) //只获取自己标签
      .head()
      .getAs[String]("rule")

    //3.2解析标签规则
    val ruleMap: Map[String, String] = tagRule
      .split("\\n")
      .map {
        line =>
          val Array(attrKey, attrValue) = line.trim.split("=")
          (attrKey, attrValue)
      }
      .toMap
    logWarning(s"==============<  ${ruleMap.mkString(",")}  >============")
    //inType -> hbase,zkHosts -> bigdata-cdh01.itcast.cn,zkPort -> 2181,hbaseTable -> tbl_tag_users,selectFieldNames -> id,gender,family -> detail

    //3.3判断数据源inType,读取业务数据
    var businessDF:DataFrame = null
    if("hbase".equals(ruleMap("inType").toLowerCase)){
      //封装标签规则中的数据源信息至HBaseMeta对象中
      val hbaseMeta: HBaseMeta = HBaseMeta.getHBaseMeta(ruleMap)
      businessDF = HBaseTools.read(
        spark, hbaseMeta.zkHosts, hbaseMeta.zkPort, hbaseMeta.hbaseTable,
        hbaseMeta.family, hbaseMeta.selectFieldNames.split(",")
      )
    }else{
      new RuntimeException("业务标签未提供数据源信息，获取不到业务数据，无法计算标签")
    }
    businessDF.printSchema()
    businessDF.show(100,truncate = false)


    // TODO: 4. 业务数据和属性标签结合，构建标签：规则匹配型标签 -> rule match
    //4.1获取属性标签规则
    val attrTagRuleMap: Map[String, String] = basicTagDF
      .filter($"level" === 5)
      .select($"rule", $"name")
      .as[(String,String)]//分装成二元组中,元组就是样例类
      .rdd
      .collectAsMap()
      .toMap
    //通过广播变量机制广播出去，减少内存占用，Executor中的索引Task共享一个Executor中的变量
    val attrTagRuleMapBroadcast: Broadcast[Map[String, String]] = spark.sparkContext.broadcast(attrTagRuleMap)
    //4.2自定义UDF函数
    val job_udf: UserDefinedFunction = udf(
      (job:String) => {
        attrTagRuleMapBroadcast.value(job)
      }
    )
    //3.3使用UDF函数打标签，依据Job职业和属性标签规则进行标签化
    val modelDF: DataFrame = businessDF.select(
      $"id".as("userId"),
      job_udf($"job").as("job")
    )

    modelDF.printSchema()
    modelDF.show(100,false)

    // TODO: 5. 将标签数据存储到HBase表中：用户画像标签表 -> tbl_profile
    HBaseTools.write(
      modelDF,"bigdata-cdh01","2181","tbl_profile","user","userId"
    )
    spark.stop()

  }

}
