package cn.getech.data.development.job

import java.io.ByteArrayOutputStream
import java.security.KeyFactory
import java.security.interfaces.RSAPublicKey
import java.security.spec.{PKCS8EncodedKeySpec, X509EncodedKeySpec}
import java.util
import java.util.{Base64, Map, Properties}

import cn.getech.data.development.job.util.{JdbcUtil, ParseDateTime, PropertiesUtils, RangerRestUtil, SapConn, SapConnUtils}
import com.sap.conn.jco._
import com.sap.conn.jco.rt.DefaultTable
import javax.crypto.Cipher
import org.apache.spark.broadcast.Broadcast
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.types._
import org.apache.spark.sql.{DataFrame, Row, SparkSession}
import org.slf4j.LoggerFactory

import scala.collection.JavaConverters._
import scala.collection.mutable
import scala.collection.mutable.ListBuffer
import scala.util.control.Breaks.{break, breakable}

object SapData2Hive {
  private val log = LoggerFactory.getLogger(this.getClass)
  private val properties: Properties = PropertiesUtils.getProperties("common.properties")
  private val publicKey = PropertiesUtils.getValue(properties,"inputPublicKey")
  private val rangerUrl = PropertiesUtils.getValue(properties,"rangerUrl")
  private val rangerUtil: RangerRestUtil = new RangerRestUtil(rangerUrl)

  def main(args: Array[String]): Unit = {
    if(args.length < 1){
      log.error("1 args are at least needed in this job...")
      System.exit(1)
    }

    val jobid = args(0)
    val timehour = args(1)

    val hdfsPath = "hdfs:///bdp/jobconfig/1/jobid_" + jobid + ".json"
    val spark: SparkSession = getSparkSession()

    val formatTime =  if(timehour.contains(" ")){
      timehour.split(" ")(0)
    }else {
      timehour
    }

    //spark.sparkContext.setCheckpointDir(s"hdfs:///spark_chechpoint/${this.getClass.getSimpleName}/${formatTime}/")
    val rows: Array[Row] = readConfig(spark,hdfsPath)



    //检测配置项中的必填条件 begin




    //检测配置项中的必填条件 end

    var broadcast: Broadcast[(java.util.Map[String, Nothing],String,String)] = null

    var map: java.util.Map[String, Nothing] = null
    try{
      map = rows(0).getValuesMap(rows(0).schema.fieldNames).asJava
      broadcast = spark.sparkContext.broadcast((map,timehour,publicKey))
    }catch{
      case e: Exception => log.error("获取sap配置信息错误...")
    }

    if(null == map || null == broadcast){
      log.error("map,broadcast为空...")
    }

    val tuple: (java.util.Map[String, ListBuffer[String]], java.util.Map[String, ListBuffer[String]]) = initializeDataByName(broadcast,spark)

    //val frameMap: mutable.Map[String, DataFrame] = sapTables2DataFrame(spark,tuple,broadcast)

    if(!tuple._1.isEmpty && !tuple._2.isEmpty){
      saveSapData2HiveTable(timehour,spark,tuple,broadcast)
    }else{
      log.error(" -- sap获取数据为 0 -- ")
    }

    spark.stop()
  }

  /**
   * 检验用户权限
   * @param map 配置文件
   * @param rangerUrl rangerUrl
   * @return 是否有权限，true/false
   */
  def checkUserPolicy(map: Map[String, Nothing], rangerUrl: String): mutable.Map[String, Boolean] = {

    val ranger_user: String = if(null != map.get("rangerUserName")){
      map.get("rangerUserName")
    }else{
      "hdfs"
    }
    val tableNames: Seq[String] = map.get("output_table_name").asInstanceOf[Seq[String]]
    val output_db_name: String = map.get("output_db_name")

    val tableNameAndBoolean: mutable.Map[String, Boolean] = rangerUtil.queryPermissionByUserAndTables(ranger_user,output_db_name,tableNames)

    tableNameAndBoolean
  }



  /**
   *
   * @param sparkSession
   * @param tuple
   * @param broadcast
   * @return
   */
  def sapTables2DataFrame(sparkSession: SparkSession, tuple: (java.util.Map[String, ListBuffer[String]], java.util.Map[String, ListBuffer[String]]), broadcast: Broadcast[java.util.Map[String, Nothing]]):mutable.Map[String, DataFrame] = {
    val map: java.util.Map[String, Nothing] = broadcast.value
    //输出表
    var outputTableSeq: Seq[String] = Seq()
    try{
      outputTableSeq = map.get("output_table_name").asInstanceOf[Seq[String]]
    }catch {
      case e: Exception => log.error(e.getMessage)
    }

    //sap数据表
    val sapDataMap: java.util.Map[String, ListBuffer[String]] = tuple._1
    // sap 表名和结构
    val sapSchemaMap: java.util.Map[String, ListBuffer[String]] = tuple._2
    //存储输出
    val tableSchemMap: mutable.Map[String, DataFrame] = mutable.Map[String,DataFrame]()

    val set: java.util.Set[Map.Entry[String, ListBuffer[String]]] = sapSchemaMap.entrySet()
    val iterator: java.util.Iterator[Map.Entry[String, ListBuffer[String]]] = set.iterator()
    val tableAndStructType: mutable.Map[String, StructType] = mutable.Map[String,StructType]()
    //遍历获取schema
    while(iterator.hasNext){
      val entry: Map.Entry[String, ListBuffer[String]] = iterator.next()
      //sap表名
      val tableName: String = entry.getKey
      val fieldAndType: ListBuffer[String] = entry.getValue
      val fields: ListBuffer[StructField] = ListBuffer[StructField]()
      for (elem <- fieldAndType) {
        val splits: Array[String] = elem.split("&")
        val fieldName: String = splits(0)
        val fieldType: String = splits(1)
        val atomicType = fieldType match {
          case "NUM" => StringType
          case "DATE" => StringType
          case "CHAR" => StringType
          case "BCD" => StringType
        }
        val struct = StructField(fieldName.toLowerCase,atomicType)

        fields += struct
      }
      val structType = new StructType(fields.toArray)
      tableAndStructType.put(tableName.toLowerCase,structType)
    }

    var realTableNameInOutput: String = ""
    //遍历所有输出表
    if(outputTableSeq.nonEmpty){
      for (outputTable <- outputTableSeq) {
        val sapTableNames: java.util.Set[String] = sapSchemaMap.keySet()
        val sapTabelIter: java.util.Iterator[String] = sapTableNames.iterator()
        while (sapTabelIter.hasNext){
          val sapTableName: String = sapTabelIter.next()
          if(outputTable.toLowerCase.endsWith(sapTableName.toLowerCase)){
            realTableNameInOutput = sapTableName.toLowerCase
            log.error(s"--- outputTableName: ${outputTable} -- realTableNameInOutput: ${realTableNameInOutput} ---")
          }
        }

        val datas: ListBuffer[String] = sapDataMap.getOrDefault(realTableNameInOutput,new ListBuffer[String])

        log.error(datas.toList.toString())
        val dataRdd: RDD[String] = sparkSession.sparkContext.parallelize(datas.toSeq)
        val structType: StructType = tableAndStructType.getOrElse(realTableNameInOutput,new StructType())
        //空值过滤
        val notNullRDD: RDD[String] = dataRdd.filter {
          case str => val bool = (null != str && "" != str)
            bool
        }

        val rowRDD: RDD[Row] = notNullRDD.map(_.split(",")).map(Row.fromSeq(_))
        log.error(s" --- sapTableName: ${outputTable} ---")
        rowRDD.collect().map(println(_))
        val frame: DataFrame = sparkSession.createDataFrame(rowRDD,structType)
        tableSchemMap.put(outputTable.toLowerCase(),frame)
      }
    }else{
      val allTableSet: java.util.Set[Map.Entry[String, ListBuffer[String]]] = sapDataMap.entrySet()
      val allTableIter: java.util.Iterator[Map.Entry[String, ListBuffer[String]]] = allTableSet.iterator()

      while(allTableIter.hasNext){
        val entry: Map.Entry[String, ListBuffer[String]] = allTableIter.next()
        val tableName: String = entry.getKey
        val allTableList: ListBuffer[String] = entry.getValue
        val tableStruct: StructType = tableAndStructType.get(tableName.toLowerCase()).get
        val strRDD: RDD[String] = sparkSession.sparkContext.parallelize(allTableList.toSeq)
        //空值过滤
        val notNullRDD: RDD[String] = strRDD.filter {
          case str => val bool = (null != str && "" != str)
            bool
        }
        val rowRDD: RDD[Row] = notNullRDD.map(_.split(",")).map(Row.fromSeq(_))
        val frame: DataFrame = sparkSession.createDataFrame(rowRDD,tableStruct)
        tableSchemMap.put(tableName.toLowerCase,frame)
      }
    }
    val keySet: collection.Set[String] = tableSchemMap.keySet
    for (elem <- keySet) {
      log.error(s" -- elem : ${elem} -- ")
    }
    tableSchemMap
  }

  /**
   * 获取SparkSession
   * @return SparkSession
   */
  def getSparkSession():SparkSession = {

    val spark = SparkSession.builder().appName("SapData2Hive").config("spark.rpc.message.maxSize",1024).enableHiveSupport().getOrCreate()
    //val spark = SparkSession.builder().appName("DBDate2Hive").master("local").getOrCreate()
    spark
  }

  /**
   * 读取sap配置信息和hive指定
   * @param spark sparkSession
   * @param path jsonPath
   * @return Array[Row] 返回一行配置数据
   */
  def readConfig(spark: SparkSession,path:String):Array[Row]={
    log.error(s" --- sap_jsonfile_path:${path} --- ")
    val rows: Array[Row] = spark.read.json(path).collect()
    //val map: Map[String, Nothing] = row.getValuesMap(row.schema.fieldNames)
    rows
  }

  /**
   * 获取sap数据
   *
   * @param broadcast 包含sap配置map的广播变量
   * @param sparkSession  sparkSession
   * @return dataframe
   */
  def initializeDataByName(broadcast: Broadcast[(java.util.Map[String, Nothing],String,String)],sparkSession: SparkSession): (java.util.Map[String,ListBuffer[String]],java.util.Map[String,ListBuffer[String]]) = {

    val map: java.util.Map[String, Nothing] = broadcast.value._1
    val time: String = broadcast.value._2

    //是否开启循环取数
    val is_open_lood_access = try {
      map.get("is_open_lood_access").asInstanceOf[Int]
    }catch {
      case exception: Exception => 1
    }
    //循环 取数起始标志
    val begin_GDA = try {
      map.get("begin_GDA").asInstanceOf[String]
    }catch {
      case exception: Exception => ""
    }

    //循环取数结束标志
    val end_GDA = try {
      map.get("end_GDA").asInstanceOf[String]
    }catch {
      case exception: Exception => ""
    }
    //循环取数停止标志
    val stop_sign = try {
      map.get("stop_sign").asInstanceOf[String]
    }catch {
      case exception: Exception => ""
    }
    //循环取数停止标志的值
    val stop_sign_val = try {
      map.get("stop_sign_val").asInstanceOf[String]
    }catch {
      case exception: Exception =>
        log.error("停止标识的值为空...")
        ""
    }

    //sap链接
    val conn: SapConn = getSapConn(map)
    //函数名
    val rfcFunc: String = map.get("rfcFunc").asInstanceOf[String].toUpperCase

    //输入字段 Seq[String]
    val inputParamer: Seq[String] = map.get("inputFieldNames").asInstanceOf[Seq[String]]

    //输入值
    val inputs: Seq[String] = map.get("inputFieldValues").asInstanceOf[Seq[String]]

    //输出表
    var outputTable: String = ""
    try{
      outputTable = map.get("outputTableName").asInstanceOf[String]

    }catch {
      case e: Exception => log.error(e.getMessage)
    }

    //表字段
    var tableParam: Seq[String] = Seq()
    try{
      tableParam = map.get("outputFieldNames").asInstanceOf[Seq[String]]
    }catch {
      case e: Exception => log.error(e.getMessage)
    }
    //表字段属性
    var tableParamType: Seq[String] = Seq()
    try{
      tableParamType = map.get("outputFieldDataTypes").asInstanceOf[Seq[String]]
    }catch {
      case e: Exception => log.error(e.getMessage)
    }
    //预置补充值
    var tableParamValues: Seq[String] = Seq()
    try{
      tableParamValues = map.get("outputFieldValues").asInstanceOf[Seq[String]]
    }catch {
      case e: Exception => log.error(e.getMessage)
    }

    log.error(s"输入参数： -- rfcFunction:${rfcFunc} -- inputParam:${inputParamer} -- inputValue:${inputs} -- outputTable:${outputTable} -- tableParam:${tableParam}" +
      s" -- tableParamType:${tableParamType} --")

    if(tableParam.size!= tableParamType.size){
      throw new RuntimeException("表输出参数和表输出参数类型个数不一致。。。")
    }

    val  dest: JCoDestination = SapConnUtils.getSapConn(conn)
    dest.ping
    log.error(" - - - - 开始调用sap数据接口 - - - -")
    // 遍历全部得到的table并处理  函数名
    val  function: JCoFunction= dest.getRepository.getFunction(rfcFunc.toUpperCase());
    log.info(s" ---- function.toXML:${function.toXML} ---- ")
    var importP: JCoParameterList = function.getImportParameterList

    //传参输入
    if(importP != null){
      for(o <- 0 until inputParamer.size){
        val timeStr: String = ParseDateTime.replaceDateTime(inputs(o),time)
        try{
          importP.setValue(inputParamer(o).toUpperCase,timeStr)
        }catch {
          case e:Exception =>
            log.error(e.getMessage)
        }
        log.error(s" --- 输入参数设置 inputParamer: ${inputParamer(o).toUpperCase} , inputParamValue: ${inputs(o)} , 转换后的: ${timeStr} --- ")
      }
    }else{
      importP = function.getTableParameterList
      val iterator: java.util.Iterator[JCoField] = importP.iterator()
      while(iterator.hasNext){
        val field: JCoField = iterator.next()
        val responseTable: JCoTable = field.getTable
        responseTable.appendRow()
        for(o <- 0 until inputParamer.size){

          val timeStr: String = ParseDateTime.replaceDateTime(inputs(o),time)
          log.error(s" --- 输入字段：${inputParamer(o)} , 实际输入参数： ${inputs(o)} , 转换后的输入参数： ${timeStr} --- ")
          responseTable.setValue(inputParamer(o).toUpperCase,timeStr)
        }
      }
    }

    val  tableParameterList: JCoParameterList = function.getTableParameterList

    val jCoTables: JCoTable = if(null != tableParameterList){
      function.getTableParameterList.getTable(outputTable)
    }else{
      function.getExportParameterList.getTable(outputTable)
    }
    //运行函数
    function.execute(dest)

    log.error(" - - - - 开始获取sap数据 - - - - ")

    //数据存储map
    val sapTableDataMap: Map[String,ListBuffer[String]] = new java.util.HashMap[String,ListBuffer[String]]()
    //数据结构map
    val sapTableSchemaMap: Map[String,ListBuffer[String]] = new java.util.HashMap[String,ListBuffer[String]]()

    //判断二级结构
    if(checkIsDouble(jCoTables)){
      //是二级结构--获取下级的表
      if(null != jCoTables){
        //下级有多少列（小表或列或空值）
        val rows: Int = jCoTables.getNumRows
        for(row <- 0 until rows) {
          breakable {

            jCoTables.setRow(row)
            //获取该指针位置表或其他 内容的
            val rowFieldCounts: Int = jCoTables.getRecordMetaData.getFieldCount
            //遍历下级结构
            for (rowFieldCount <- 0 until rowFieldCounts) {
              var table: AnyRef = null
              //子结构名称
              val childFieldName: String = jCoTables.getMetaData.getName(rowFieldCount).toLowerCase()
              //  log.error(s"childFieldName:${childFieldName}")
              try {
                table = jCoTables.getTable(jCoTables.getMetaData.getName(rowFieldCount))
              } catch {
                case e: Exception => log.debug(e.getMessage)
              }

              //判断是不是表 不等于null 则是table结构 转成defaultTable
              if (null != table) {
                val defaultTable: DefaultTable = table.asInstanceOf[DefaultTable]
                val tableRows: Int = defaultTable.getNumRows

                //数据不做处理 空表
                if (0 == tableRows) {

                } else {

                  val childSchemaList: ListBuffer[String] = sapTableSchemaMap.getOrDefault(childFieldName, new ListBuffer[String])
                  val childDataList: ListBuffer[String] = sapTableDataMap.getOrDefault(childFieldName, new ListBuffer[String])
                  for (tableRow <- 0 until tableRows) {
                    defaultTable.setRow(tableRow)
                    val dataSb = new mutable.StringBuilder()
                    val fieldCounts: Int = defaultTable.getMetaData.getFieldCount
                    for (fieldCount <- 0 until fieldCounts) {
                      breakable {
                        var value: AnyRef = defaultTable.getValue(fieldCount)
                        if ("" == value || null == value) {
                          value = "null"
                        }
                        var valueStr: String = ""
                        val fieldNameAndTypeSb = new mutable.StringBuilder()
                        //列名
                        val fieldName: String = defaultTable.getMetaData.getName(fieldCount).toLowerCase()
                        val typeString: String = defaultTable.getMetaData.getTypeAsString(fieldCount)

                        fieldNameAndTypeSb.append(fieldName).append("&").append(typeString)
                        // log.error(s"fieldNameAndTypeSb:${fieldNameAndTypeSb}")
                        //不存在则将列名放入
                        if (!childSchemaList.contains(fieldNameAndTypeSb.toString())) {
                          childSchemaList += fieldNameAndTypeSb.toString()
                        }
                        val valueString: String = value.toString
                        if(valueString.contains("\n")){
                          valueStr = valueString.replaceAll("\n","")
                        }else{
                          valueStr = valueString
                        }
                        dataSb.append(valueStr).append("&&")
                      }
                    }
                    val childDataStr: String = dataSb.toString().substring(0, dataSb.length - 2)
                    // log.error(s"tableRow :${tableRow} -- ${dataSb.toString().substring(0, dataSb.length - 1)}")
                    childDataList += childDataStr
                    sapTableDataMap.put(childFieldName.toLowerCase, childDataList)
                    sapTableSchemaMap.put(childFieldName.toLowerCase, childSchemaList)
                  }
                }
              } else {
                //二级表下不是表的子列 目前不做处理 防日后需要
                try{
                  if(null != jCoTables.getValue(rowFieldCount)){
                    if(stop_sign.equalsIgnoreCase(jCoTables.getMetaData.getName(rowFieldCount))){
                      if(stop_sign_val.equals(jCoTables.getValue(rowFieldCount))){
                        log.info(s" --- ${stop_sign} 值为 ${jCoTables.getValue(rowFieldCount)} 目前已经到最后一条数据 ---")
                      }else{
                        log.debug(s" --- ${stop_sign} 值为 ${jCoTables.getValue(rowFieldCount)} --- ")
                      }
                    }
                    //数据没有存储
                  }
                }catch{
                  case e: Exception => log.debug(e.getMessage)
                }
              }
            }
          }
        }
      }
    }else{
      log.error(" 进入组织架构表。。。 ")
      val tableSchemaList: ListBuffer[String] = sapTableSchemaMap.getOrDefault(outputTable.toLowerCase,new ListBuffer[String])
      val tableDataList: ListBuffer[String] = sapTableDataMap.getOrDefault(outputTable.toLowerCase,new ListBuffer[String])
      val rows: Int = jCoTables.getNumRows
      for(row <- 0 until rows){
        jCoTables.setRow(row)
        val fieldCounts: Int = jCoTables.getMetaData.getFieldCount
        val sb = new mutable.StringBuilder
        for(fieldCount <- 0 until fieldCounts){
          val fieldAndTypeSb = new mutable.StringBuilder()
          val fieldName: String = jCoTables.getMetaData.getName(fieldCount).toLowerCase()
          val typeString: String = jCoTables.getMetaData.getTypeAsString(fieldCount)
          fieldAndTypeSb.append(fieldName).append("&").append(typeString)

          if(!tableSchemaList.contains(fieldAndTypeSb.toString())){
            tableSchemaList+=fieldAndTypeSb.toString()
          }
          var valueString = ""
          var value: AnyRef = jCoTables.getValue(fieldCount)
          if("" == value || null == value){
            value = "null"
          }
          val valueStr: String = value.toString
          if(valueStr.contains("\n")){
            valueString = valueStr.replaceAll("\n","")
          }else{
            valueString = valueStr
          }

          valueString = value.toString.replaceAll("\n","")
          sb.append(valueString).append("&&")
          //println(s"tableName: o_struc_Info \n -- fieldName: ${fieldName} \n valueStr : ${valueString}")
        }

        val tableDataStr: String = sb.toString().substring(0, sb.length - 2)

        tableDataList += tableDataStr

        sapTableDataMap.put(outputTable.toLowerCase,tableDataList)
        sapTableSchemaMap.put(outputTable.toLowerCase,tableSchemaList)
      }
    }

    val set: java.util.Set[Map.Entry[String, ListBuffer[String]]] = sapTableDataMap.entrySet()
    val iterator: java.util.Iterator[Map.Entry[String, ListBuffer[String]]] = set.iterator()

    log.error(" - - - sap数据读取完成.... - - - ")
    while(iterator.hasNext){
      val entry: Map.Entry[String, ListBuffer[String]] = iterator.next()
      val tableName: String = entry.getKey
      val dataList: ListBuffer[String] = entry.getValue

      log.error(s" ----  sapTableName: ${tableName}  tableCount: ${dataList.size} ----")
    }

    /*val collection: java.util.Collection[ListBuffer[String]] = sapTableDataMap.values()
    val value: java.util.Iterator[ListBuffer[String]] = collection.iterator()
    while(value.hasNext){
      val strings: ListBuffer[String] = value.next()
      for (elem <- strings) {
        log.error(s"data: ${elem}")
      }
    }*/

    (sapTableDataMap,sapTableSchemaMap)
  }

  /**
   * 校验是否是两级嵌套结构
   * @param jCoTables  jCoTables
   * @return Boolean
   */
  def checkIsDouble(jCoTables: JCoTable): Boolean ={
    var flag = false
    if(null != jCoTables) {
      for (i <- 0 until jCoTables.getNumRows) {
        jCoTables.setRow(i)
        for ( k <- 0 until jCoTables.getMetaData.getFieldCount) {
          var table: AnyRef = null
          try{
            table = jCoTables.getTable(jCoTables.getMetaData.getName(k))
          }catch {
            case e: Exception => log.debug(e.getMessage)
          }
          if(null != table){
            flag = true
          }
        }
      }
    }
    flag
  }

  /**
   * 获取sap链接
   * @return SapConn
   */
  def getSapConn(map:java.util.Map[String,Nothing]): SapConn ={

    val targetOrg: String = map.get("isGroup")
    var sysnr: String = ""
    var host: String = ""
    var mshost: String = ""
    var jco_group: String = ""
    var r3name: String = ""
    var target: Int = -1

    //检验sap数据源类型
    if(null != targetOrg){
      target = targetOrg.toInt
      if(target == 1){
        mshost = map.get("jCO_MSHOST")
        jco_group = map.get("jCO_GROUP")
        r3name = map.get("jCO_R3NAME")
      }else if(target == 0){
        sysnr = map.get("jCO_SYSNR")
        host = map.get("jCO_ASHOST")
      }
    }else{
      sysnr = map.get("jCO_SYSNR")
      host = map.get("jCO_ASHOST")
    }

    val client:String = map.get("jCO_CLIENT")
    val user:String = map.get("jCO_USER")
    val passwdOrg: String = map.get("jCO_PASSWD")
    val passwd = JdbcUtil.rsaDecrypt(passwdOrg)
    val lang:String = map.get("jCO_LANG")

    log.info(s"SapConn: -- ${host}，${sysnr},${client},${user},${lang},${target},${jco_group},${mshost},${r3name} --")
    val conn = SapConn(host,sysnr,client,user,passwd,lang,target,mshost,jco_group,r3name)
    //val conn = SapConn("10.0.0.111","00","620","RFC_DTD","1qaz@wsx","ZH")
    conn
  }

  /**
   * 将sap读取的数据写入hive表
   * @param time 用于输出表表名的动态获取
   * @param sparkSession sparkSession
   * @param tuple sapSchema 和 sapData 的二元组
   * @param broadcast jsonMap
   */
  def saveSapData2HiveTable(time:String, sparkSession: SparkSession, tuple: (java.util.Map[String, ListBuffer[String]], java.util.Map[String, ListBuffer[String]]),broadcast: Broadcast[(java.util.Map[String,Nothing],String,String)]): Unit ={
    val map: java.util.Map[String, Nothing] = broadcast.value._1

    //输出表
    var outputTableSeq: Seq[String] = map.get("output_table_name").asInstanceOf[Seq[String]]

    var rfcFunc = ""
    try{
      rfcFunc = map.get("rfcFunc").asInstanceOf[String].toUpperCase
    }catch {
      case e: Exception => log.error("获取rfc函数失败")
    }

    if(outputTableSeq.isEmpty){
      log.error("hive输出表为空")
    }

    //sap数据表
    var sapDataMap: java.util.Map[String, ListBuffer[String]] = null
    if(!tuple._1.isEmpty){
      sapDataMap = tuple._1
    }else{
      log.error("获取SapDataMap为空 ...")
    }
    // sap 表名和结构
    var sapSchemaMap: java.util.Map[String, ListBuffer[String]] = null
    if(!tuple._2.isEmpty){
      sapSchemaMap = tuple._2
    }else{
      log.error("获取SapSchemaMap为空 ...")
    }
    val isOwner = checkUserPolicy(map,rangerUrl)
    val exceptions: ListBuffer[Exception] = new ListBuffer[Exception]()

    //遍历获取数据
    val set: java.util.Set[Map.Entry[String, ListBuffer[String]]] = sapDataMap.entrySet()
    val iterator: java.util.Iterator[Map.Entry[String, ListBuffer[String]]] = set.iterator()
    var flag = false
    while (iterator.hasNext){
      val entry: Map.Entry[String, ListBuffer[String]] = iterator.next()
      val sapTableName: String = entry.getKey
      val sapDataList: ListBuffer[String] = entry.getValue
      log.error(s"---  遍历output_table_name中... sapDataList.size :${sapDataList.size}  ---")
      for (output_hive_table <- outputTableSeq) {
        val targetOutputTableName: String = rfcFunc+ "_" + sapTableName.toLowerCase
        //根据数仓命名规则对数仓表和sap表名匹配：数仓命名规则_函数名_sap表名
        if(output_hive_table.toLowerCase.endsWith(targetOutputTableName.toLowerCase) && sapDataList.size > 0){
          flag = true
          val sapSchemaList: ListBuffer[String] = sapSchemaMap.get(sapTableName)
          log.error(s" --- 输出表名： ${output_hive_table} ， sap数据源表名：${sapTableName} ,sapSchemaSize：${sapSchemaList.size} ---")
          log.error(s"---  进行匹配：${output_hive_table.toLowerCase} .endWith( ${targetOutputTableName.toLowerCase} ) 结果：${output_hive_table.endsWith(sapTableName.toLowerCase)} --- ")
          val option: Option[Boolean] = isOwner.get(output_hive_table)
          if(option.isEmpty || !option.get){
            throw new RuntimeException(s"- - - You do not have permission to write to the table ${output_hive_table}")
          }

          if(sapSchemaList.nonEmpty){
            try{
              //将取出来的数据转换成dataframes
              sapdata2dfs(sparkSession,sapDataList,sapSchemaList,output_hive_table,broadcast)
            }catch {
              case e: Exception => log.error(s" - - - 输出表：${output_hive_table} 输出报错, 报错信息: ${e.getMessage} - - - ")
                if(sapSchemaList.size <= 1){
                  throw e
                }else{
                  exceptions += e
                }
            }
          }else{
            log.error("sapSchemaList.size == 0")
          }
        }else{
          log.error(s"---  进行匹配：${output_hive_table} .endWith( ${targetOutputTableName} ) 结果：${output_hive_table.endsWith(targetOutputTableName.toLowerCase)} --- ")
        }
      }
    }
    if(false == flag){
      throw new RuntimeException("未找到hive匹配sap输出命名规则的表名")
    }

    if(exceptions.size > 0){
      for (elem <- exceptions) {
        throw elem
      }
    }
  }

  /**
   * 检测 table 是否有字段加密
   * @param spark sparkSession
   * @param map json inner map
   */
  def checkTableEncryption(spark: SparkSession, map:Map[String,Nothing]): mutable.Map[String, String] ={

    val columnMap: mutable.Map[String, String] = mutable.Map[String,String]()

    val driver: String = PropertiesUtils.getValue(this.properties,"driver")
    val userName: String = PropertiesUtils.getValue(this.properties,"username")
    val passwd: String = PropertiesUtils.getValue(this.properties,"password")
    val url: String = PropertiesUtils.getValue(this.properties,"mysqlUrl")

    val properties = new Properties()
    properties.put("driver", driver)
    properties.put("user", userName)
    properties.put("url", url)
    properties.put("password", passwd)

    val allTableIds: String = try {
      map.get("allTableIds").toString
    }catch {
      case exception: Exception => "1"
    }

    val tableSql = "SELECT " +
      "a.table_id AS tableId, " +
      "c.db_name AS dbName, " +
      "c.table_name AS tableName, " +
      "a.field_name, " +
      "a.field_alias AS fieldAlias, " +
      "a.field_type AS fieldType, " +
      "IFNULL(b.permiss_name,'不加密') AS permissName " +
      "  FROM " +
      "bdp_table_field AS a " +
      "LEFT JOIN bdp_table_info AS c ON c.id = a.table_id " +
      "LEFT JOIN bdp_data_permission_param AS b ON a.ency_style_id = b.id " +
      s"WHERE c.id IN (${allTableIds}) " +
      "AND b.permiss_name != '不加密' "

    val df = spark.read.jdbc(url, s"(${tableSql}) as m",properties)
    val empty: Boolean = df.isEmpty

    if(!empty){
      val rows: Array[Row] = df.select("tableName","field_name","permissName").collect()
      for (row <- rows) {
        val tableName: String = row.get(0).toString
        val columnStr: String = row.get(1).toString
        val columnPer: String = row.get(2).toString
        columnMap.put(columnStr,tableName+"&"+columnPer)
      }
    }
    columnMap
  }

  /**
   * 获取hive输出表的字段顺序
   * @param sparkSession sparkSession
   * @param hiveOutputTableName hive输出的表名
   * @param hiveOutputDatabase hive输出的库名
   * @return 用”，“连接的表顺序的字段字符串
   */
  def getHiveTableFieldNames(sparkSession: SparkSession, hiveOutputTableName: String, hiveOutputDatabase: String,partitionStr: String): String = {
    val fieldsSB = new mutable.StringBuilder()
    val tableColumns = new ListBuffer[String]()

    //获取输出表的字段
    sparkSession.sql(s"use ${hiveOutputDatabase}")
    val frame: DataFrame = sparkSession.sql(s"desc ${hiveOutputTableName}")
    frame.show()
    val colNameDF: DataFrame = frame.select("col_name")
    val rows: Array[Row] = colNameDF.collect()

    //重复字段处理——desc 会把partition字段展示出来，这里再获取的时候要处理掉
    for (row <- rows) {
      breakable{
        val string: String = row.get(0).toString
        if("".equals(string) || string.startsWith("#")){
          break()
        }else{
          if(!tableColumns.contains(string)){
            tableColumns += string
          }else {
            tableColumns -= string
          }
        }
      }
    }

    var fieldStr: String = ""
    for(size <-  tableColumns.indices){
      fieldsSB.append("t.`").append(tableColumns(size)).append("`,")
    }

    fieldStr = fieldsSB.deleteCharAt(fieldsSB.length-1).toString()
    log.error(s" --- 拼接后的输出表字段顺序： ${fieldStr} --- ")
    fieldStr
  }

  /**
   *  将sap数据按batchsize转换成dataframes
   *  并写入hive
   * @param sparkSession sparkSession
   * @param sapData sap数据
   * @param sapSchema sap数据结果
   * @param hiveOutputTableName hive表min名
   * @param broadcast 输出配置和time
   */
  @throws(classOf[RuntimeException])
  def sapdata2dfs(sparkSession: SparkSession,sapData:ListBuffer[String],sapSchema:ListBuffer[String],hiveOutputTableName:String,broadcast: Broadcast[(java.util.Map[String,Nothing],String,String)]): Unit ={
    log.error(s" --- ${hiveOutputTableName} 数据转换DF --- ")
    val map: java.util.Map[String, Nothing] = broadcast.value._1
    val time = broadcast.value._2
    val publicKey = broadcast.value._3
    //设置的批量取数的值
    val batch_size: Int = try {
      map.get("batch_size").toString.toInt
    }catch {
      case e:Exception => 20000
    }
    var output_table_name: Seq[String] = Seq()
    try{
      output_table_name = map.get("output_table_name")
    }catch {
      case e: Exception => log.error(s"获取输出表失败 ${e.getMessage}")
    }
    val output_db_name: String = map.get("output_db_name")
    //有待空值处理
    var output_data_partition: String = ""
    var hight_file_num:String = ""

    //输出文件数
    try{
      hight_file_num = map.get("hight_file_num")
    }catch {
      case e: Exception => hight_file_num = "1"
        log.error(" --- 未设置表输出文件数，默认输出文件个数为 1 --- ")
    }

    //表分区的字段 “partitionKey”=
    try{
      output_data_partition = ParseDateTime.replaceDateTime(map.get("output_data_partition"),time)
    }catch {
      case e: Exception =>
        log.error(" --- 未设置表输出分区字段及字段值 --- ")
    }

    log.info(s"-- insert_into_table_info : ${output_db_name} --")
    log.info(s"-- insert_into_table_info : ${output_table_name} --")
    log.info(s"-- insert_into_table_info : ${output_data_partition} --")

    val fields = new ListBuffer[StructField]()
    breakable{
      for (elem <- sapSchema) {
        val nameAndType: Array[String] = elem.split("&")
        val fieldName: String = nameAndType(0)
        val atomicType = StringType

        val structField = StructField(fieldName,atomicType)
        fields += structField
      }

      //获取hive表的字段字符串
      var fieldStr: String = getHiveTableFieldNames(sparkSession,hiveOutputTableName,output_db_name,output_data_partition)
      val structType = new StructType(fields.toArray)
      val frames = new mutable.ListBuffer[DataFrame]

      //循环取数
      while(0 < sapData.size){

        if(sapData.size >= batch_size){
          val batchSapData: ListBuffer[String] = sapData.slice(0,batch_size)
          val strRDD: RDD[String] = sparkSession.sparkContext.parallelize(batchSapData)
          if(strRDD.isEmpty())break
          // log.error("================  strRDD -> arrayRDD ==================")
          val arrayRdd: RDD[Array[String]] = strRDD.map {
            case line =>
              val strings: Array[String] = line.split("&&")
              strings
          }
          // log.error("================  arrayRDD -> rowRdd  ==================")
          val rowRDD: RDD[Row] = arrayRdd.filter{
            case str => val bool = (null != str && "" != str)
              bool
          }.map(Row.fromSeq(_))
          // log.error("======================== 转换结束 ================================")
          sapData --= batchSapData
          val frame: DataFrame = sparkSession.createDataFrame(rowRDD,structType)
          frame.repartition(hight_file_num.toInt)

          frames += frame
        }else{
          val strRDD: RDD[String] = sparkSession.sparkContext.parallelize(sapData)
          val arrayRdd: RDD[Array[String]] = strRDD.map {
            case line =>
              val strings: Array[String] = line.split("&&")
              strings
          }
          // log.error("================  arrayRDD -> rowRdd  ==================")
          val rowRDD: RDD[Row] = arrayRdd.filter{
            case str => val bool = (null != str && "" != str)
              bool
          }.map(Row.fromSeq(_))
          // log.error("======================== 转换结束 ================================")
          val frame: DataFrame = sparkSession.createDataFrame(rowRDD,structType)
          frame.printSchema()
          frame.show(3)
          frame.repartition(hight_file_num.toInt)
          sapData.clear()
          frames += frame
        }
      }

      //处理超出批次的数据
      if(sapData.nonEmpty){
        val strRDD: RDD[String] = sparkSession.sparkContext.parallelize(sapData)
        if(strRDD.isEmpty())break
        val arrayRdd: RDD[Array[String]] = strRDD.map {
          case line =>
            val strings: Array[String] = line.split("&&")
            strings
        }
        val rowRDD: RDD[Row] = arrayRdd.filter{
          case str => val bool = (null != str && "" != str)
            bool
        }.map(Row.fromSeq(_))
        // log.error("======================== 转换结束 ================================")
        val frame: DataFrame = sparkSession.createDataFrame(rowRDD,structType)
        frame.repartition(hight_file_num.toInt)
        frames += frame
      }

      val columnsMap: mutable.Map[String, String] = checkTableEncryption(sparkSession,map)
      if(columnsMap.nonEmpty){
        val column_type_iter: Iterator[(String, String)] = columnsMap.iterator
        while(column_type_iter.hasNext){
          val tuple: (String, String) = column_type_iter.next()
          val columnStr: String = tuple._1.toLowerCase()
          val tableAndType: String = tuple._2

          val strings: Array[String] = tableAndType.split("&")
          val tableName = strings(0).toLowerCase()
          val columnType = strings(1)
          log.info(" columnStr: %s , columnType: %s ".format(columnStr,tableAndType))
          log.info(s" RSA.equals(columnType): ${"RSA".equals(columnType)}")
          log.info(s" fieldStr.contains(columnStr): ${fieldStr.contains(columnStr)}")
          log.debug(s" hiveOutputTableName.equals(tableName): ${hiveOutputTableName.equals(tableName)}")

          //如果是RSA加密方式，则替换
          if("RSA".equals(columnType) && fieldStr.contains(columnStr) && hiveOutputTableName.equals(tableName)){
            val fieldSb = new mutable.StringBuilder("t.`").append(columnStr).append("`").toString()
            log.debug("- - - fieldSb: %s".format(fieldSb))
            val newField = new mutable.StringBuilder(" stringEncrypt( t.`").append(columnStr).append("` ) as ").append(columnStr).toString()
            log.debug("- - - newField: %s".format(newField))
            fieldStr = fieldStr.replaceAll(fieldSb,newField)
            log.debug(" - - - fieldStr : %s".format(fieldStr))
          }
        }
      }

      //循环写入
      for(frameSize <- frames.indices) {
        val frame: DataFrame = frames(frameSize)
        sapdataframe2hive(sparkSession,frame,hiveOutputTableName,fieldStr,broadcast,frameSize)
      }

    }
  }

  /**
   * 公钥加密
   * @param key 加密私钥
   * @param data 待加密的数据
   * @return encodeToString 加密后的数据
   */
  def encrypt(key: String, data: String): String ={
    var encodeToString = ""
    if(!"".equals(data) && null != data){
      val decode = Base64.getDecoder().decode(key)
      val pkcs8EncodedKeySpec: PKCS8EncodedKeySpec = new PKCS8EncodedKeySpec(decode)
      val kf = KeyFactory.getInstance("RSA")
      val generatePrivate = kf.generatePrivate(pkcs8EncodedKeySpec)
      val ci = Cipher.getInstance("RSA")
      ci.init(Cipher.ENCRYPT_MODE, generatePrivate)

      val bytes = data.getBytes
      val inputLen = bytes.length
      var offLen = 0//偏移量
      var i = 0
      val bops = new ByteArrayOutputStream
      while(inputLen - offLen > 0){
        var cache: Array[Byte] = null
        if(inputLen - offLen > 117){
          cache = ci.doFinal(bytes, offLen,117)
        }else{
          cache = ci.doFinal(bytes, offLen,inputLen - offLen)
        }
        bops.write(cache)
        i+=1
        offLen = 117 * i
      }
      bops.close()
      val encryptedData = bops.toByteArray
      encodeToString = Base64.getEncoder.encodeToString(encryptedData)
    }

    encodeToString
  }

  def sapdataframe2hive(sparkSession: SparkSession, frame: DataFrame,hiveOutputTableName: String,fieldStr:String,broadcast: Broadcast[(java.util.Map[String,Nothing],String,String)],targetInt:Int) = {
    log.error(s" --- ${hiveOutputTableName}写入hive开始 --- ")
    val map: java.util.Map[String, Nothing] = broadcast.value._1
    val time: String = broadcast.value._2

    val intoHiveSql: mutable.StringBuilder = new mutable.StringBuilder()

    val output_write_model: String = if(targetInt < 1){
      map.get("output_write_model")
    } else {
      "append"
    }

    var output_table_name: Seq[String] = Seq()
    try{
      output_table_name = map.get("output_table_name")
    }catch {
      case e: Exception => log.error("获取输出表失败")
    }
    val output_db_name: String = map.get("output_db_name")
    //有待空值处理
    var output_data_partition: String = ""
    var hight_file_num:String = ""

    //输出文件数
    try{
      hight_file_num = map.get("hight_file_num")
    }catch {
      case e: Exception => hight_file_num = "1"
        log.error(" --- 未设置表输出文件数，默认输出文件个数为 1 --- ")
    }

    //表分区的字段 “partitionKey”=
    try{
      output_data_partition = ParseDateTime.replaceDateTime(map.get("output_data_partition"),time)
    }catch {
      case e: Exception =>
        log.error(" --- 未设置表输出分区字段及字段值 --- ")
    }

    log.info(s"-- insert_into_db_name : ${output_db_name} --")
    log.info(s"-- insert_into_table_name : ${output_table_name} --")
    log.info(s"-- insert_into_table_partition : ${output_data_partition} --")

    val tmpTableName = hiveOutputTableName + "_tmp"

    //log.error("privateKey: %s".format(publickKey))

    //注册rsa加密函数
    sparkSession.udf.register("stringEncrypt" ,(content: AnyRef) => {
      val publicKey = broadcast.value._3
      val outputStr = if(null!=content){
        encrypt(publicKey, content.toString)
      }else ""
      outputStr
    })

    frame.printSchema()

    //注册临时表
    frame.createOrReplaceGlobalTempView(tmpTableName)
    //条数统计
    val tmpFrame: DataFrame = sparkSession.sql("select count(*) as dataframe_count from global_temp.`%s`".format(tmpTableName))
    tmpFrame.show()

    sparkSession.sql(s"use ${output_db_name}")

    if(output_write_model!="overwrite" ){
      //追加模式
      if(output_data_partition != null && !"".equals(output_data_partition) && !"null".equals(output_data_partition)){
        intoHiveSql.append("insert into table `").append(output_db_name).append("`.`").append(hiveOutputTableName.toLowerCase)
          .append("` partition(")
          .append(output_data_partition)
          .append(") select ").append(fieldStr)
          .append(" from `global_temp`.").append(tmpTableName).append(" as t")
      }else{
        intoHiveSql.append("insert into table `").append(hiveOutputTableName.toLowerCase).append("` select ").append(fieldStr).append(" from `global_temp`.").append(tmpTableName).append(" as t")
      }
    }else if("overwrite".equals(output_write_model)){
      //覆写模式
      if( output_data_partition != null && !"".equals(output_data_partition) && !"null".equals(output_data_partition)){
        intoHiveSql.append("insert overwrite table `").append(output_db_name).append("`.`").append(hiveOutputTableName.toLowerCase)
          .append("` partition(")
          .append(output_data_partition)
          .append(") select ").append(fieldStr)
          .append(" from `global_temp`.").append(tmpTableName).append(" as t")

      }else{
        intoHiveSql.append("insert overwrite table `").append(output_db_name).append("`.`").append(hiveOutputTableName.toLowerCase).append("` select ").append(fieldStr).append(" from `global_temp`.").append(tmpTableName).append(" as t")
      }
    }

    sparkSession.sql(s"use ${output_db_name}")
    sparkSession.sql("set hive.exec.dynamic.partition.mode=nonstrict")
    log.error(s"-- insertIntoTableSql: ${intoHiveSql.toString()} --")
    try{
      sparkSession.sql(intoHiveSql.toString())
    }catch {
      case e:Exception =>
        val exception: RuntimeException = new RuntimeException(s" - - - 写入hive表：${hiveOutputTableName} 异常，报错信息: ${e.getMessage} - - - ")
        throw exception
    }
    log.error(s" -- ${hiveOutputTableName} 写入数据完成...")
  }
}