package com.ctbri.manage.compute.scala.operator

import com.ctbri.manage.compute.util.CommonUtil
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{Dataset, Row, SparkSession}
import org.slf4j.{Logger, LoggerFactory}

import java.lang.reflect.Field
import scala.reflect.ClassTag

/**
 * 操作Hive数据仓库工具类
 */
object HiveLoader {
  var db: String = "mdm"
  protected final val logger: Logger = LoggerFactory.getLogger(this.getClass)

  /**
   * 记录系统日志
   *
   * @param message 消息
   */
  def logWarning(message: String): Unit = {
    logger.warn(message)
  }

  /**
   * 加载指定sql的数据，存放到临时表中
   *
   * @param session  sparkSession
   * @param sql      sql语句
   * @param tmpTable 临时表
   */
  def loadData(session: SparkSession, sql: String, tmpTable: String): Unit = {
    session.sql("use " + this.db)
    logger.warn("sql:" + sql)
    val df = session.sql(sql)
    logger.warn("sql数据条数：" + df.count())
    df.cache()
    df.createOrReplaceTempView(tmpTable)
  }


  /**
   * 用反射去加载Hive数据仓库的表
   *
   * @param session hive上下文
   * @param sql     查询语句
   * @param clazz   表对应的模型
   * @return 指定模型类的RDD
   */
  def load[T: ClassTag](session: SparkSession, sql: String, clazz: Class[T]): RDD[T] = {
    load(session, sql, clazz, null)
  }




  /**
   * 执行sql语句，返回指定类型的RDD集合
   *
   * @param session       sparkSession
   * @param sql           sql语句
   * @param clazz         指定的class类型
   * @param excludeFields 排除的属性
   * @tparam T 类
   * @return
   */
  def load[T: ClassTag](session: SparkSession, sql: String, clazz: Class[T], excludeFields: Array[String]): RDD[T] = {
    session.sql("use " + this.db)
    val df = session.sql(sql)
    logWarning("执行sql：" + sql)
    val rdd = df.rdd
    val result = rdd.map { row => {
      val fields = getFields(clazz)
      val instance = clazz.newInstance()
      fields.foreach { x => {
        if (!x.getName.equals("serialVersionUID")) {
          if ((excludeFields == null) || (!excludeFields.contains(x.getName))) {
            try {
              val methodName = "set" + CommonUtil.upperFirstLetter(x.getName)
              val method = clazz.getDeclaredMethod(methodName, x.getType)
              //logger.warn("属性值为："+x.getName+":"+row.getAs(x.getName.toLowerCase()).getClass().getName)
              method.invoke(instance, row.getAs(x.getName.toLowerCase()))
            } catch {
              case t: Throwable => logger.error(t.getMessage, t) // TODO: handle error
            }
          }
        }
      }
      }
      instance
    }
    }
    result
  }

  /**
   * 用反射去加载Hive数据仓库的表（指定分区），
   *
   * @param session    hive上下文
   * @param table      表名称
   * @param partitions 分区的名值对
   * @param clazz      表对应的模型
   * @return 指定模型类的RDD
   */
  def loadTable[T: ClassTag](session: SparkSession, table: String, partitions: Map[String, Int], clazz: Class[T]): RDD[T] = {
    val sqlbuffer = new StringBuffer
    sqlbuffer.append("select * from " + table)
    if (partitions.nonEmpty) {
      sqlbuffer.append(" where ")
      var tmp = ""
      partitions.keys.foreach { x => {
        sqlbuffer.append(tmp)
        sqlbuffer.append(x + "=" + partitions(x))
        tmp = " and "
      }
      }
    }
    logWarning(sqlbuffer.toString)
    load(session, sqlbuffer.toString, clazz)
  }

  /**
   * 用反射去加载Hive数据仓库的表（不指定分区）
   *
   * @param session hive上下文
   * @param table   表名称
   * @param clazz   表对应的模型
   * @return 指定模型类的RDD
   */
  def loadTable[T: ClassTag](session: SparkSession, table: String, clazz: Class[T]): RDD[T] = {
    val sql = "select * from " + table
    load(session, sql, clazz)
  }

  /**
   * 加载hive中的表（指定分区）
   *
   * @param session    sparkSession
   * @param table      表名
   * @param partitions 分区
   * @return
   */
  def loadTable(session: SparkSession, table: String, partitions: Map[String, Int]): RDD[Row] = {
    session.sql("use " + this.db)
    val sqlbuffer = new StringBuffer
    sqlbuffer.append("select * from " + table)
    if (partitions.nonEmpty) {
      sqlbuffer.append(" where ")
      var tmp = ""
      partitions.keys.foreach { x => {
        sqlbuffer.append(tmp)
        sqlbuffer.append(x + "=" + partitions(x))
        tmp = " and "
      }
      }
    }
    logWarning(sqlbuffer.toString)
    val df = session.sql(sqlbuffer.toString)
    df.rdd
  }

  /**
   * 向Hive数据仓库的表中插入数据
   *
   * @param session hive上下文
   * @param table   表名称
   * @param rdd     要插入的数据集
   * @param clazz   表对应的模型
   * @return
   */
  def upload[T](session: SparkSession, table: String, rdd: RDD[T], clazz: Class[T]): Unit = {
    val fields = new Array[String](0)
    upload(session, table, rdd, clazz, isOverwrite = true, fields)
  }

  def upload[T](session: SparkSession, table: String, rdd: RDD[T], clazz: Class[T], isOverwrite: Boolean, excludeFields: Array[String]): Unit = {
    session.sql("use " + this.db)
    val df = session.createDataFrame(rdd, clazz)
    df.createOrReplaceTempView("cem_temp")
    val buffer = new StringBuffer
    var opt = " into "
    if (isOverwrite) {
      opt = " overwrite "
    }
    buffer.append("insert " + opt + table + " select  ")
    buffer.append(getFieldsFromClass(clazz, excludeFields))
    buffer.append(" from cem_temp")
    session.sql(buffer.toString)
  }

  /**
   * 向Hive数据仓库的表中插入数据
   *
   * @param session    hive上下文
   * @param table      表名称
   * @param partitions [分区名,分区值] 要插入的分区
   * @param rdd        要插入的数据集
   * @param clazz      表对应的模型
   * @return
   */
  def upload[T](session: SparkSession, table: String, partitions: Map[String, Int], rdd: RDD[T], clazz: Class[T]): Unit = {
    val fields = new Array[String](0)
    upload(session, table, partitions, rdd, clazz, isOverwrite = true, fields)
  }

  def upload[T](session: SparkSession, table: String, partitions: Map[String, Int], rdd: RDD[T], clazz: Class[T], isOverWrite: Boolean): Unit = {
    val fields = new Array[String](0)
    upload(session, table, partitions, rdd, clazz, isOverWrite, fields)
  }

  def upload[T](session: SparkSession, table: String, partitions: Map[String, Int], rdd: RDD[T], clazz: Class[T], isOverwrite: Boolean, excludeField: Array[String]): Unit = {
    upload(session, table, partitions, rdd, clazz, isOverwrite, excludeField, "cgs_temp")
  }

  /**
   * 向Hive数据仓库的表中插入数据
   *
   * @param session      sparkSession
   * @param table        hive表
   * @param partitions   分区
   * @param rdd          需要写入的rdd
   * @param clazz        类
   * @param isOverwrite  是否覆盖
   * @param excludeField 排除字段
   * @param tmpTable     临时表名称
   * @tparam T t
   */
  def upload[T](session: SparkSession, table: String, partitions: Map[String, Int], rdd: RDD[T], clazz: Class[T],
                isOverwrite: Boolean, excludeField: Array[String], tmpTable: String): Unit = {
    //val count=rdd.count()
    session.sql("use " + this.db)
    val df = session.createDataFrame(rdd, clazz).repartition(4).persist()
    //df.show();
    df.createOrReplaceTempView(tmpTable)
    val buffer = new StringBuffer
    var operate = "into"
    if (isOverwrite) {
      operate = "overwrite"
    }
    buffer.append("insert " + operate + " table " + table)
    var tmp = ""
    val pBuffer = new StringBuffer
    //添加分区信息
    if (partitions.nonEmpty) {
      pBuffer.append(" partition( ")
      partitions.keys.foreach { x => {
        pBuffer.append(tmp)
        pBuffer.append(x)
        pBuffer.append("=")
        pBuffer.append(partitions(x))
        tmp = ","
      }
      }
      pBuffer.append(")")
    }
    buffer.append(pBuffer.toString)
    buffer.append(" select ")
    buffer.append(getFieldsFromClass(clazz, excludeField))
    buffer.append(" from " + tmpTable)
    val starttime = System.currentTimeMillis()
    logWarning("插入数据 sql:" + buffer.toString)
    session.sql(buffer.toString)
    val endtime = System.currentTimeMillis()
    val dur = CommonUtil.computeDurMin(starttime, endtime)
    logWarning("向表" + table + "插入数据，用时分钟数为：" + dur)
  }

  /**
   * 向Hive数据仓库的表中插入数据
   *
   * @param session     sparkSession
   * @param table       表
   * @param partitions  分区
   * @param isOverwrite 是否覆盖
   * @param dataSql     数据查询sql
   * @param fields      属性列
   * @param partNum     文件数量
   * @tparam T t
   */
  def upload[T](session: SparkSession, table: String, partitions: Map[String, Int],
                isOverwrite: Boolean, dataSql: String, fields: String, partNum: Int): Unit = {
    session.sql("use " + this.db)
    logger.warn("dataSql:" + dataSql)
    val df = session.sql(dataSql)
    upload(session, table, partitions, isOverwrite, df, fields, partNum)
  }

  /**
   * 向Hive数据仓库的表中插入数据
   *
   * @param session     sparkSession
   * @param table       表
   * @param partitions  分区
   * @param isOverwrite 是否覆盖
   * @param df          DataFrame
   * @param fields      属性列
   * @param partNum     文件数量
   * @tparam T t
   */
  def upload[T](session: SparkSession, table: String, partitions: Map[String, Int],
                isOverwrite: Boolean, df: Dataset[Row], fields: String, partNum: Int): Unit = {
    session.sql("use " + this.db)
    df.repartition(partNum).cache()
      .createOrReplaceTempView("temp_hiveloader_partitions")
    val buffer = new StringBuffer
    var operate = "into"
    if (isOverwrite) {
      operate = "overwrite"
    }
    buffer.append("insert " + operate + " table " + table)
    var tmp = ""
    val pBuffer = new StringBuffer
    //添加分区信息
    if (partitions!=null&&partitions.nonEmpty) {
      pBuffer.append(" partition( ")
      partitions.keys.foreach { x => {
        pBuffer.append(tmp)
        pBuffer.append(x)
        pBuffer.append("=")
        pBuffer.append(partitions(x))
        tmp = ","
      }
      }
      pBuffer.append(")")
    }
    buffer.append(pBuffer.toString)
    buffer.append(" select ")
    buffer.append(fields)
    buffer.append(" from temp_hiveloader_partitions")
    val starttime = System.currentTimeMillis()

    logWarning("插入数据 sql:" + buffer.toString)
    session.sql(buffer.toString)
    val endtime = System.currentTimeMillis()
    val dur = CommonUtil.computeDurMin(starttime, endtime)
    logWarning("向表" + table + "插入数据，用时分钟数为：" + dur)
  }

  /**
   * 利用反射机制获取指定类的所有字段集合
   *
   * @param clazz 类
   * @tparam T t
   * @return
   */
  def getFields[T](clazz: Class[T]): Array[Field] = {
    var fields = clazz.getDeclaredFields
    var pclazz = clazz.getSuperclass
    while (pclazz != classOf[Object]) {
      val pfields = pclazz.getDeclaredFields
      fields = fields.union(pfields)
      pclazz = pclazz.getSuperclass
    }
    fields
  }

  /**
   * 按字符顺序获取类的字段，与hive顺序同步
   */
  def getFieldsFromClass[T](clazz: Class[T], excludeFields: Array[String]): String = {
    val buffer = new StringBuffer
    val fields = clazz.getDeclaredFields
    val compareFun = (f1: Field, f2: Field) => {
      f1.getName.toLowerCase() < f2.getName.toLowerCase()
    }
    val result = fields.sortWith(compareFun)
    var tmp = ""
    result.foreach { x => {
      if (!x.getName.equals("serialVersionUID")) {
        if ((excludeFields == null) || (!excludeFields.contains(x.getName))) {
          buffer.append(tmp).append(x.getName.toLowerCase())
          tmp = ","
        }
      }
    }
    }
    buffer.toString
  }

}
