package cn.itcast.tags.models

import cn.itcast.tags.meta.HBaseMeta
import cn.itcast.tags.models.rule.JobModel.logWarning
import cn.itcast.tags.tools.HBaseTools
import org.apache.hadoop.hbase.client.{Put, Result}
import org.apache.hadoop.hbase.io.ImmutableBytesWritable
import org.apache.spark.SparkConf
import org.apache.spark.internal.Logging
import org.apache.spark.sql.{DataFrame, SparkSession}
import org.apache.spark.storage.StorageLevel

/**
 * 标签模型开发基类，各个标签模型继承此类，实现其中打标签方法doTag
 */
trait BasicModel extends Logging{
  //变量声明
  var spark:SparkSession = _

  //初始化方法，构建sparksession对象
  def init(): Unit ={
    // a. 创建SparkConf,设置应用相关配置
    val sparkConf: SparkConf = new SparkConf()
      .setMaster("local[4]")
      .setAppName(this.getClass.getSimpleName.stripSuffix("$"))
      // 设置序列化为：Kryo
      .set("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
      .set("spark.sql.shuffle.partitions", "4")
      .registerKryoClasses(
        Array(classOf[ImmutableBytesWritable],classOf[Result],classOf[Put])
      )

    // b. 建造者模式创建SparkSession会话实例对象
    spark = SparkSession.builder()
      .config(sparkConf)
      // 启用与Hive集成
      .enableHiveSupport()
      // 设置与Hive集成: 读取Hive元数据MetaStore服务
      .config("hive.metastore.uris", "thrift://bigdata-cdh01.itcast.cn:9083")
      // 设置数据仓库目录,设置成hive的数据仓库目录
      .config( "spark.sql.warehouse.dir", "hdfs://bigdata-cdh01.itcast.cn:8020/user/hive/warehouse" )
      .getOrCreate()
    // c. 返回会话对象
  }

  //2.准备标签数据，依据标签ID从mysql数据库表中tbl_basic_tag获取标签数据
  def getTagData(tagId:Long): DataFrame ={
    // TODO: 2. 从MySQL数据库读取标签数据（基础标签表：tbl_basic_tag），依据业 务标签ID读取
    val tagTable:String=
      s"""
        |(
        |select id,name,rule,level from tbl_basic_tag where id=${tagId}
        |union
        |select id,name,rule,level from tbl_basic_tag where pid=${tagId}
        |) as tag_table
        |""".stripMargin

    spark.read.format("jdbc")
      .option("driver", "com.mysql.jdbc.Driver")
      .option("url", "jdbc:mysql://bigdata-cdh01.itcast.cn:3306/profile_tags?useUnicode=true&characterEncoding=UTF8&serverTimezone=UTC")
      .option("dbtable", tagTable)
      .option("user", "root")
      .option("password", "123456")
      .load()
  }


  //3.业务数据，依据rule规则，从数据源获取业务数据
  def getBusinessData(tagDF:DataFrame): DataFrame ={
    // TODO: 3. 依据业务标签规则获取业务数据，比如到HBase数据库读取表的数据
    import tagDF.sparkSession.implicits._
    val tagRule: String = tagDF
      .filter($"level" === 4) //只获取自己标签
      .head()
      .getAs[String]("rule")
    //logWarning(s"==============<  $tagRule  >============")
    //3.2解析标签规则
    val ruleMap: Map[String, String] = tagRule
      .split("\\n")
      .map {
        line =>
          val Array(attrKey, attrValue) = line.trim.split("=")
          (attrKey, attrValue)
      }
      .toMap
    logWarning(s"==============<  ${ruleMap.mkString(",")}  >============")
    //inType -> hbase,zkHosts -> bigdata-cdh01.itcast.cn,zkPort -> 2181,hbaseTable -> tbl_tag_users,selectFieldNames -> id,gender,family -> detail

    //3.3判断数据源inType,读取业务数据
    var businessDF:DataFrame = null
    if("hbase".equals(ruleMap("inType").toLowerCase)){
      //封装标签规则中的数据源信息至HBaseMeta对象中
      val hbaseMeta: HBaseMeta = HBaseMeta.getHBaseMeta(ruleMap)
      businessDF = HBaseTools.read(
        spark, hbaseMeta.zkHosts, hbaseMeta.zkPort, hbaseMeta.hbaseTable,
        hbaseMeta.family, hbaseMeta.selectFieldNames.split(",")
      )
    }else{
      new RuntimeException("业务标签未提供数据源信息，获取不到业务数据，无法计算标签")
    }

    businessDF
  }

  def doTag(businessDF: DataFrame, tagDF: DataFrame):DataFrame

  //4、报错画像标签纸HBase表中
  def saveTag(modelDF:DataFrame): Unit ={
    // TODO: 5. 将标签数据存储到HBase表中：用户画像标签表 -> tbl_profile
    HBaseTools.write(
      modelDF,"bigdata-cdh01","2181","tbl_profile","user","userId"
    )
  }

  //关闭资源
  def close(): Unit ={
    if(null != spark){
      spark.stop()
    }
  }


  //规定标签模型执行顺序
  def executeModel(tagId:Long): Unit ={
    init()
    try{
      //获取标签数据
      val tagDF: DataFrame = getTagData(tagId)
      //缓存标签数据
      tagDF.persist(StorageLevel.MEMORY_AND_DISK)
      tagDF.count()//触发缓存

      //获取业务数据
      val businessDF: DataFrame = getBusinessData(tagDF)

      //打标签
      val modelDF: DataFrame = doTag(businessDF, tagDF)

      //报错标签
      if(null!=modelDF) saveTag(modelDF)
      tagDF.unpersist()
    }catch {
      case e:Exception => e.printStackTrace()
    }finally {
      close()
    }
  }
}
