package com.feidee.fd.sml.algorithm.component.input

import java.sql.{Connection, ResultSet}

import com.feidee.fd.sml.algorithm.component.{AbstractComponent, BasicParam}
import com.feidee.fdcommon.configuration.CustomConfiguration
import com.feidee.fdcommon.constant.CommonConstant
import com.feidee.fdhadoop.hive.HiveUtil
import org.apache.spark.sql.DataFrame

import scala.util.Random
/**
  * @Author caokaizhi, songhaicheng
  * @Date 2018-08-14 20:57
  */
case class HiveInputParam(
                           input_pt: String,
                           output_pt: String,
                           hive_table: String,
                           flow_time: String,
                           hiveSql: String,
                           // 查询引擎，"spark" -> SparkSQL 查询，"hive" -> JDBC 连接 Hive 查询。默认 spark
                           execType: String,
                           env: String
                         ) extends BasicParam {

  def this() = this(null, null, null, null, null, "spark", null)

  override def verify(): Unit = {
    super.verify()
    require(tool.isNotNull(hiveSql), "param hiveSql can't be null")
    require(!tool.isNotNull(hive_table), "param hiveTable is not allowed with HiveInput Component")
    require(Seq("spark", "hive").contains(execType),
      "param execType expects (\"spark\", \"hive\"), but has \"" + execType + "\"")
  }

  override def toMap: Map[String, Any] = {
    var map = super.toMap
    map += ("hiveSql" -> hiveSql)
    map += ("execType" -> execType)
    map += ("env" -> env)
    map
  }
}


class HiveInput extends AbstractComponent[HiveInputParam] {

  override def loadData(param: HiveInputParam): DataFrame = {
    spark.sql(param.hiveSql)
  }

  /**
    * 使用 jdbc 连接 Hive 查数据
    * @return
    */
  /*def openConn(param: HiveInputParam): Connection = {
    CustomConfiguration.setString(CommonConstant.ENV_KEY, param.env)
    // 判断是正式环境发起的请求还是其它环境的
    val connProp = if (CommonConstant.ENV_LIVE.equals(param.env)) {
      ("org.apache.hive.jdbc.HiveDriver",
        "jdbc:hive2://" +
          "zk-common1.suishoushuju.internal:2181," +
          "zk-common2.suishoushuju.internal:2181," +
          "zk-common3.suishoushuju.internal:2181," +
          "zk-common4.suishoushuju.internal:2181," +
          "zk-common5.suishoushuju.internal:2181/" +
          ";serviceDiscoveryMode=zooKeeper;zooKeeperNamespace=hiveserver2",
        "fdbd_rec",
        "vbg56zxw")
    } else {
      ("org.apache.hive.jdbc.HiveDriver",
        "jdbc:hive2://sz-7-centos186:2181,sz-7-centos187:2181,sz-7-centos188:2181/;serviceDiscoveryMode=zooKeeper;zooKeeperNamespace=hiveserver2",
        "hive",
        "hive")
    }
    HiveUtil.getConn(connProp._1, connProp._2, connProp._3, connProp._4)
  }*/

  /**
    * 将读出的数据拼成 parquet 文件并写入 HDFS
    * @param param
    */
  def writeParquetToHDFS(param: HiveInputParam): Unit = {
   // val conn = openConn(param)

    //val stmt = conn.prepareStatement(param.hiveSql, ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_READ_ONLY)
  //  stmt.setFetchSize(10000)

    // 拆分 SQL 语句，注意要去掉每个语句分句的前后空格，防止语句执行 DML（CREATE TABLE）报错
    val sqlPartsWithIndex = param.hiveSql.split(";").map(_.trim).zip(Stream from 1)
    val queries = sqlPartsWithIndex.filter(s => s._1.toUpperCase().startsWith("SELECT"))
    require(queries.length > 0, "解析出的 SQL 语句未发现查询数据用的语句（SELECT/select 开头），请检查下 hiveSql 参数")
    // 找到最后一个 SELECT 语句在哪执行，也就是决定最终返回结果的会在哪里
    val lastQueryIndex = queries.last._2
    // 依次执行 SQL
    for (sqlWithIndex <- sqlPartsWithIndex) {
      try {
        if (sqlWithIndex._2 != lastQueryIndex) {
          logInfo(s"executing sql in part(${sqlWithIndex._2}/${sqlPartsWithIndex.last._2}): ${sqlWithIndex._1}")
        //  stmt.execute(sqlWithIndex._1)
          spark.sql(sqlWithIndex._1)
        } else {
          // 运行到了最终返回结果处的 SQL
          logInfo("fetching final data in" +
            s" part(${sqlWithIndex._2}/${sqlPartsWithIndex.last._2}): ${sqlWithIndex._1}")
          // 保存结果到临时表
          val tmpHiveTableName = s"temp.sml_hiveinput_${System.currentTimeMillis()}_${Random.nextInt(100)}"
         // stmt.execute(s"create table $tmpHiveTableName as ${sqlWithIndex._1}")
          spark.sql(s"create table $tmpHiveTableName as ${sqlWithIndex._1}")
          // 另存数据为 parquet 文件到指定 HDFS 路径
          spark.table(tmpHiveTableName).write.save(param.output_pt)
          // 删除临时表（和对应 HDFS 数据）
        //  stmt.execute(s"drop table if exists $tmpHiveTableName")
          spark.sql(s"drop table if exists $tmpHiveTableName")
        }
      } catch {
        case e: Exception =>
          e.printStackTrace()
          throw e
      }
    }

    //conn.close()
  }

  override def apply(paramStr: String): Unit = {
    val param = parseParam(paramStr)
    param.verify()
    println(s"using ${param.execType} to execute sql: ${param.hiveSql}")
    param.execType match {
      case "spark" =>
        val inputData = loadData(param)
        val outputDF = process(param, inputData)
        outputData(outputDF, param)
      case "hive" => writeParquetToHDFS(param)
    }
  }

}

object HiveInput {
  val component = new HiveInput()

  def apply(paramStr: String): Unit = {
    component(paramStr)
  }

  def main(args: Array[String]): Unit = {
    val component = HiveInput
    component(args(0))
  }
}
