package base

import java.util.Properties

import bean.HBaseMeta
import com.typesafe.config.{Config, ConfigFactory}
import org.apache.spark.SparkContext
import org.apache.spark.sql.{DataFrame, Dataset, Row, SparkSession}


/*
用于编写项目可重用的代码
 */
trait BaseModel {
  //根据特质的特性
  //不一样的需求需要单独编写代码、计算。所以我们需要将其定义为抽象变量或抽象方法，在子类中重写。
  //其他的定义为普通方法

  //设置任务名称
  def setAppName: String

  //设置四级标签id
  def setFourId:String

  //读取application.conf内的配置文件
  private val conf: Config = ConfigFactory.load()
  private val url: String = conf.getString("jdbc.mysql.url")
  private val tableName: String = conf.getString("jdbc.mysql.tableName")

  //按照先后顺序执行
  def exec()={
    //获取mysql连接
    val mysqlCoon: DataFrame = CoonMysql()
    //获取四级标签 rule 处理后 封装到样例类
    val hBaseMeta: HBaseMeta = getFourTag(mysqlCoon)
    //获取五级标签数据
    val fiveDF: DataFrame = getFiveTagDF(mysqlCoon)
    //读取HBASE数据
    val HBaseDF: DataFrame = getHBaseDF(hBaseMeta)
    //五级数据和HBASE进行标签计算
    val newJobTagDF: DataFrame = getNewTag(spark,fiveDF, HBaseDF)
    //经过追加、去重动作后，将标签写入HBASE
    saveAllTag(hBaseMeta, newJobTagDF)
  }

  //1 初始化sparkSession对象
  private val spark: SparkSession = SparkSession.builder().appName(setAppName).master("local[*]").getOrCreate()
  private val sc: SparkContext = spark.sparkContext
  sc.setLogLevel("WARN")

  //2 连接MySQL
  def CoonMysql() = {
    val mysqlCoon: DataFrame = spark.read.jdbc(url, tableName, new Properties)
    mysqlCoon
  }

  //隐式转换
  import spark.implicits._
  //引入sparkSQL的内置函数
  import org.apache.spark.sql.functions._


  //3 读取四级标签为读取HBASE数据做准备
  def getFourTag(mysqlCoon:DataFrame)={
    val fourDS: Dataset[Row] = mysqlCoon.select('rule).where("id="+setFourId)
    val fourMap: Map[String, String] = fourDS.map(row => {
      //使用##切分
      row.getAs("rule").toString.split("##")
        //再用=切分
        .map(line => {
          val arr: Array[String] = line.split("=")
          (arr(0), arr(1))
        })
    }).collectAsList().get(0).toMap

    //将fourMap转化为HBaseMeta样例类

    var HbaseMeta: HBaseMeta = getHBaseMeta(fourMap)
    HbaseMeta
  }

  //4 读取五级标签 为标签计算做准备 单独处理------------------------未完成 后面看具体
  def getFiveTagDF(mysqlCoon:DataFrame)={
    mysqlCoon.select("rule","id").where("pid="+setFourId).toDF()
  }

  //5 根据四级标签读取HBASE数据
  def getHBaseDF(hbaseMeta: HBaseMeta)={
    val HBaseDatas: DataFrame = spark.read.format("tools.HBaseDataSource")
      .option(HBaseMeta.ZKHOSTS, hbaseMeta.zkHosts)
      .option(HBaseMeta.ZKPORT, hbaseMeta.zkPort)
      .option(HBaseMeta.HBASETABLE, hbaseMeta.hbaseTable)
      .option(HBaseMeta.FAMILY, hbaseMeta.family)
      .option(HBaseMeta.SELECTFIELDS, hbaseMeta.selectFields)
      .load()
    HBaseDatas
  }

  //6 根据五级标签和HBASE数据进行标签计算 打标签 单独处理
  def getNewTag(spark: SparkSession,fiveTagDF:DataFrame,HBaseDF:DataFrame):DataFrame

  //7 将标签写入HBASE
  def saveAllTag(HbaseMeta: HBaseMeta,newJobTags:DataFrame)={
    //自定义函数
    val getAllTags = udf((historyTagId: String, newTagId:String) => {
      if (historyTagId == "") {
        newTagId
      } else if (newTagId == "") {
        historyTagId
      } else if (newTagId == "" && historyTagId == "") {
        ""
      } else {
        //拼接历史数据和新数据（多次运行可能有重复数据）
        val alltags: String = historyTagId + "," + newTagId
        //使用，分割去重后返回字符串类型
        alltags.split(",").distinct.mkString(",")
      }
    })
    //读取标签结果表 追加标签覆盖写入
    //a读取test内历史标签数据 已经计算出来的标签
    val historyTag: DataFrame = spark.read.format("tools.HBaseDataSource")
      .option(HBaseMeta.ZKHOSTS, HbaseMeta.zkHosts)
      .option(HBaseMeta.ZKPORT, HbaseMeta.zkPort)
      .option(HBaseMeta.HBASETABLE, "test")
      .option(HBaseMeta.FAMILY, "detail")
      .option(HBaseMeta.SELECTFIELDS, "userId,tagsId")
      .load()
    //  historyTag.show(20)
    //b追加计算出来的标签到历史数f据
    val JoinTags: DataFrame = historyTag.join(newJobTags, historyTag("userId") === newJobTags("userId"))
    val updateTags: DataFrame = JoinTags.select(
      when(historyTag.col("userId").isNotNull, historyTag.col("userId"))
        .when(newJobTags.col("userId").isNotNull, newJobTags.col("userId"))
        .as("userId"),
      //处理第二个字段 将两个字段合并到一起
      //自定义函数用于做数据的拼接
      getAllTags(historyTag.col("tagsId"), newJobTags.col("tagsId")).as("tagsId")

    )

    //标签经过去重、追加动作后写入HBASE
    updateTags.write.format("tools.HBaseDataSource")
      .option(HBaseMeta.ZKHOSTS, HbaseMeta.zkHosts)
      .option(HBaseMeta.ZKPORT, HbaseMeta.zkPort)
      .option(HBaseMeta.HBASETABLE, "test")
      .option(HBaseMeta.FAMILY, "detail")
      .option(HBaseMeta.SELECTFIELDS,"userId,tagsId")
      .save()

  }



  //封装四级数据样例类方法
  def getHBaseMeta(fourMap: Map[String, String]): HBaseMeta = {
    HBaseMeta(
      fourMap.getOrElse(HBaseMeta.INTYPE, ""),
      fourMap.getOrElse(HBaseMeta.ZKHOSTS, ""),
      fourMap.getOrElse(HBaseMeta.ZKPORT, ""),
      fourMap.getOrElse(HBaseMeta.HBASETABLE, ""),
      fourMap.getOrElse(HBaseMeta.FAMILY, ""),
      fourMap.getOrElse(HBaseMeta.SELECTFIELDS, ""),
      fourMap.getOrElse(HBaseMeta.ROWKEY, "")
    )
  }
}
