package cn.itcast.tags.models.rule

import cn.itcast.tags.meta.HBaseMeta
import cn.itcast.tags.tools.HBaseTools
import org.apache.hadoop.hbase.client.{Put, Result}
import org.apache.hadoop.hbase.io.ImmutableBytesWritable
import org.apache.spark.SparkConf
import org.apache.spark.internal.Logging
import org.apache.spark.sql.{DataFrame, Dataset, Row, SparkSession}

/**
 * 标签模型开发：用户性别标签
 */
object GenderModel extends Logging{
  /*
        性别,,"inType=hbase
              zkHosts=bigdata-cdh01.itcast.cn
              zkPort=2181
              hbaseTable=tbl_tag_users
              family=detail
              selectFieldNames=id,gender"
        男,,1
        女,,2

   */

  def main(args: Array[String]): Unit = {
    // TODO: 1. 创建SparkSession实例对象 1234567891011121314151617
    //4.3、标签数据读取
    val spark: SparkSession = {
      // 1.a. 创建SparkConf 设置应用信息
      val sparkConf = new SparkConf()
        .setAppName(this.getClass.getSimpleName.stripSuffix("$"))
        .setMaster("local[4]")
        .set("spark.sql.shuffle.partitions", "4")
        // 由于从HBase表读写数据，设置序列化
        .set("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
        .registerKryoClasses(
          Array(classOf[ImmutableBytesWritable], classOf[Result], classOf[Put])
        )
      // 1.b. 建造者模式构建SparkSession对象
      val session = SparkSession.builder().config(sparkConf).getOrCreate()
      // 1.c. 返回会话实例对象
      session
    }

    import spark.implicits._
    import org.apache.spark.sql.functions.to_date

    // TODO: 2. 从MySQL数据库读取标签数据（基础标签表：tbl_basic_tag），依据业 务标签ID读取
    val tagTable:String=
      """
        |(
        |select id,name,rule,level from tbl_basic_tag where id=318
        |union
        |select id,name,rule,level from tbl_basic_tag where pid=318
        |) as tag_table
        |""".stripMargin

    val basicTagDF: DataFrame = spark.read.format("jdbc")
      .option("driver", "com.mysql.jdbc.Driver")
      .option("url", "jdbc:mysql://bigdata-cdh01.itcast.cn:3306/profile_tags?useUnicode=true&characterEncoding=UTF8&serverTimezone=UTC")
      .option("dbtable", tagTable)
      .option("user", "root")
      .option("password", "123456")
      .load()

    basicTagDF.printSchema()
    basicTagDF.show(10,truncate = false)

    // TODO: 3. 依据业务标签规则获取业务数据，比如到HBase数据库读取表的数据
    val tagRule: String = basicTagDF
      .filter($"level" === 4) //只获取自己标签
      .head()
      .getAs[String]("rule")
    //logWarning(s"==============<  $tagRule  >============")
    //3.2解析标签规则
    val ruleMap: Map[String, String] = tagRule
      .split("\\n")
      .map {
        line =>
          val Array(attrKey, attrValue) = line.trim.split("=")
          (attrKey, attrValue)
      }
      .toMap
    logWarning(s"==============<  ${ruleMap.mkString(",")}  >============")
//inType -> hbase,zkHosts -> bigdata-cdh01.itcast.cn,zkPort -> 2181,hbaseTable -> tbl_tag_users,selectFieldNames -> id,gender,family -> detail

    //3.3判断数据源inType,读取业务数据
    var businessDF:DataFrame = null
    if("hbase".equals(ruleMap("inType").toLowerCase)){
      //封装标签规则中的数据源信息至HBaseMeta对象中
      val hbaseMeta: HBaseMeta = HBaseMeta.getHBaseMeta(ruleMap)
      businessDF = HBaseTools.read(
        spark, hbaseMeta.zkHosts, hbaseMeta.zkPort, hbaseMeta.hbaseTable,
        hbaseMeta.family, hbaseMeta.selectFieldNames.split(",")
      )
    }else{
      new RuntimeException("业务标签未提供数据源信息，获取不到业务数据，无法计算标签")
    }
    businessDF.printSchema()
    businessDF.show(100,truncate = false)


    // TODO: 4. 业务数据和属性标签结合，构建标签：规则匹配型标签 -> rule match
    //4.1获取属性标签规则
    val attrTagRule: DataFrame = basicTagDF
      .filter($"level" === 5)
      .select($"rule", $"name")

    //4.2将业务数据和属性标签数据，按照字段进行关联，属于等值join
    val modelDF: DataFrame = businessDF.join(
      attrTagRule,
      businessDF("gender") === attrTagRule("rule"),
      "inner"
    ).select(
      $"id".as("userId"),
      $"name".as("gender")
    )

    modelDF.printSchema()
    modelDF.show(800,false)
    // TODO: 5. 将标签数据存储到HBase表中：用户画像标签表 -> tbl_profile
    HBaseTools.write(
      modelDF,"bigdata-cdh01","2181","tbl_profile","user","userId"
    )

    spark.stop()
  }
}
