package cn.getech.data.development.job

import java.io.{BufferedReader, ByteArrayOutputStream, InputStream, InputStreamReader}
import java.security.KeyFactory
import java.security.spec.PKCS8EncodedKeySpec
import java.util.{Base64, Properties}

import cn.getech.data.development.job.DBData2Hive.properties
import cn.getech.data.development.job.util.{FtpUtil, JdbcUtil, ParseDateTime, PropertiesUtils, RangerRestUtil}
import com.alibaba.fastjson.{JSON, JSONArray}
import javax.crypto.Cipher
import org.apache.commons.net.ftp.{FTP, FTPClient}
import org.apache.spark.broadcast.Broadcast
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.types._
import org.apache.spark.sql._
import org.slf4j.LoggerFactory
import sun.net.ftp.FtpClient

import scala.collection.JavaConverters._
import scala.collection.mutable
import scala.collection.mutable.{ArrayBuffer, ListBuffer}
import scala.util.control.Breaks.{break, breakable}

object FtpData2Hive {
  private val log = LoggerFactory.getLogger(this.getClass)
  private val properties: Properties = PropertiesUtils.getProperties("common.properties")
  private val publicKey = PropertiesUtils.getValue(properties,"inputPublicKey")
  private var batchSize = 50000
  private val rangerUrl = PropertiesUtils.getValue(properties,"rangerUrl")
  private val rangerUtil: RangerRestUtil = new RangerRestUtil(rangerUrl)

  def main(args: Array[String]): Unit = {

    //参数校验
    if(args.length < 2){

      println("At least need one parameter <configJsonPath>")
      System.exit(2)
    }

    //json配置文件路径
    val jobId = args(0)
    //分区未格式化时间
    val time = args(1)

    if(args.size == 3){
      batchSize = args(2).toInt
    }

    val jsonPath  = "hdfs:///bdp/jobconfig/1/jobid_" + jobId + ".json"

    val spark: SparkSession = getSparkSession()

    //读取json获取map
    val map: Map[String, AnyRef] = readJsonConf(spark,jsonPath)

    //检测map中的必填条件 begin




    //检测map中的必填条件 end
    if(map.isEmpty){
      log.error("failure to get ftp config json file... ")
      break()
    }

    val isOwner = checkUserPolicy(map,rangerUrl)

    if(isOwner){

      val broadcast: Broadcast[(Map[String, AnyRef], String,String)] = spark.sparkContext.broadcast((map,time,publicKey))

      clientReadFtpData(spark,broadcast)
    }else{
      throw new RuntimeException (s"You do not have permission to write to the table ${map.get("output_table_name").toString}")
    }

    spark.stop()

    renameReadedFile(map,time)
  }

  /**
   * 检验用户权限
   * @param map 配置文件
   * @param rangerUrl rangerUrl
   * @return 是否有权限，true/false
   */
  def checkUserPolicy(map: Map[String, AnyRef], rangerUrl: String): Boolean = {

    var ranger_user: String = ""
    if(map.get("rangerUserName").isEmpty) {
      ranger_user = map.get("rangerUserName").toString
    }else{
      ranger_user = "hdfs"
    }

    val output_table_name: String = map.get("output_table_name").toString
    val output_db_name: String = map.get("output_db_name").toString

    val result: Boolean = rangerUtil.queryPermissionByUserAndTable(ranger_user,output_db_name,output_table_name)

    result
  }

  /**
   * 将读取过的文件重命名
   * @param map ftp相关配置
   * @param time 动态文件路径的时间
   */
  def renameReadedFile(map: Map[String, AnyRef],time:String): Unit ={
    val user: String = map.getOrElse("ftp_user","").toString
    val passwd: String = map.getOrElse("ftp_passwd","").toString
    val realPasswd = JdbcUtil.rsaDecrypt(passwd)
    val host: String = map.getOrElse("ftp_host","").toString
    val filepath: String = ParseDateTime.replaceDateTime(map.getOrElse("ftp_file_path","").toString,time)
    val port: String = map.getOrElse("ftp_port","").toString
    val strings: Array[String] = filepath.split("/")
    val fileName = strings(strings.length-1)

    log.info(s"fileuser:${user}")
    log.info(s"filehost:${host}")
    log.info(s"fileport:${port}")
    log.info(s"filePath:${filepath}")
    log.info(s"fileName:${fileName}")

    val client = new FTPClient()
    client.connect(host,port.toInt)

    if(client.login(user,realPasswd)) {
      log.info("ftp successful login...")
      client.enterLocalPassiveMode()
      //client.enterLocalActiveMode()
      val path: String = filepath.replaceAll(s"/${fileName}", "")
      log.info(filepath)
      client.setRemoteVerificationEnabled(false)
      client.changeWorkingDirectory(path)
      client.setDataTimeout(1200000)
      client.setBufferSize(10240)
      client.setControlEncoding("UTF-8")
      client.setFileType(FTP.BINARY_FILE_TYPE)

      val result: Boolean = client.rename(fileName,new mutable.StringBuilder(fileName).append(".ok").toString())

      if(result){
        log.info(s"${"  rename success ..."}")
      }else{
        log.error(s"${"  rename error ..."}")
      }
    }else{
      log.error("ftpclient 链接失败···")
    }
  }

  /**
   *  ftpData转换成dataframe
   * @param spark sparksession
   * @param schemaStr 数据结构字符串
   * @param ftpDataArr ftp数据数组
   * @return dataframe
   */
  def ftpData2DataFrame(spark: SparkSession, schemaStr: String, ftpDataArr: ArrayBuffer[String]): DataFrame = {
    log.info("Start converting FtpData to DataFrame work... ")
    var frame: DataFrame = spark.emptyDataFrame
    val ftpDataRDD: RDD[String] = spark.sparkContext.parallelize(ftpDataArr)
    val notNullDataRDD: RDD[String] = ftpDataRDD.filter {
      case line => !"".equals(line) && null != line
    }
    val ftpRowRDD: RDD[Row] = notNullDataRDD.map {
      case line =>
        val datas: Array[String] = line.split(",")
        Row.fromSeq(datas.toSeq)
    }
    //将表头转化成schema
    val structType: StructType = ftpFields2Schema(schemaStr)
    frame = spark.createDataFrame(ftpRowRDD,structType)
    log.info("End converting FtpData to DataFrame work... ")
    frame
  }

  /**
   * ftpClient方法读取ftp数据，形成20000一个批次的dataframe
   *
   * @param spark
   * @param broadcast
   * @return ListBuffer[DataFrame]
   */
  def clientReadFtpData(spark: SparkSession, broadcast: Broadcast[(Map[String,AnyRef],String,String)]): Unit = {
    log.info("Start getting ftp data work...")
    val map = broadcast.value._1
    val time = broadcast.value._2

    import spark.implicits._
    //相关配置信息
    val user: String = map.getOrElse("ftp_user","").toString
    val passwd: String = map.getOrElse("ftp_passwd","").toString
    val realPasswd = JdbcUtil.rsaDecrypt(passwd)
    val host: String = map.getOrElse("ftp_host","").toString
    val filepath: String = ParseDateTime.replaceDateTime(map.getOrElse("ftp_file_path","").toString,time)
    val port: String = map.getOrElse("ftp_port","").toString
    val dataSize: Int = map.getOrElse("batch_size","20000").toString.toInt
    val maybeString = map.get("no_standard_format")

    val formatTarget: Boolean = if(maybeString.isEmpty){
      false
    }else{
      true
    }

    //获取ftp客户端
    val client = getFtpClient(host,port,user,realPasswd)
    log.info(s" --- ftpFilPath: %s --- ".format(filepath))
    log.info(s" --- ftpuser: %s --- ".format(user))
    log.info(s" --- ftphost: %s --- ".format(host))
    log.info(s" --- ftpport: %s --- ".format(port))

    //获取流
    val is: InputStream = client.getFileStream(filepath)
    val reader = new BufferedReader(new InputStreamReader(is))

    //数据存储
    val ftpDataArr = new ArrayBuffer[String]
    //计数标志
    var ftpCount = 0
    var batchCount = 0
    var ftpData = ""
    var schemaStr = ""

    if(filepath.endsWith(".json") && !formatTarget){

      while(null != ftpData) {
        if (0 <= ftpCount && ftpCount <= dataSize) {
          ftpData = reader.readLine()
          if(null != ftpData && !"".equals(ftpData)){
            ftpDataArr += ftpData
            ftpCount += 1
          }
        } else {
          //存满2w条输出
          if (ftpDataArr.nonEmpty) {
            //val seq: Seq[String] = ftpDataArr.toSeq
            val ds: Dataset[String] = spark.createDataset(ftpDataArr)
            val frame: DataFrame = spark.read.json(ds)
            frame.na.drop()
            df2Hive(spark,frame,broadcast,batchCount)
            ftpDataArr.clear()
            log.error(" --- 获取ftp数据条数: %d --- ".format(ftpCount))
            batchCount += 1
            ftpCount = 0
          }
        }
      }

      if(ftpDataArr.nonEmpty){
        val strings = ftpDataArr.take(3)
        for (string <- strings) {
          log.error(s" - - - ftpData: ${string}")
        }
        log.error(s" --- 获取ftp数据条数: ${ftpDataArr.size} --- ")
        val seq: Seq[String] = ftpDataArr.toSeq
        val strRDD: RDD[String] = spark.sparkContext.parallelize(seq)
        val frame: DataFrame = spark.read.json(strRDD)
        log.info(" - - - ftp数据转dataframe Schema: ")
        frame.printSchema()
        df2Hive(spark,frame,broadcast,batchCount)
        batchCount += 1
        ftpDataArr.clear()
      }
    }else if(filepath.endsWith(".json") && formatTarget){
      ftpData = reader.readLine()
      if(null != ftpData && !"".equals(ftpData)){
        val jsonArray: JSONArray = JSON.parseArray(ftpData)
        val dataStrs = jsonArray.toJavaList(classOf[String]).asScala
        log.info(s" --- 获取ftp数据条数: ${dataStrs.size} --- ")
        //循环取数
        while(0 < dataStrs.size){
          //数据条数大于 batchsize
          if(dataStrs.size > batchSize){
            val strings: mutable.Buffer[String] = dataStrs.slice(0, batchSize)
            val jsonRDD: RDD[String] = spark.sparkContext.parallelize(dataStrs.seq)
            log.info("形成rdd。。。")
            val frame: DataFrame = spark.read.json(jsonRDD)
            log.info("形成dataframe。。。")
            frame.na.drop()
            log.info("dataframe 去空 。。。")
            frame.printSchema()
            df2Hive(spark,frame,broadcast,batchCount)
            dataStrs --= strings
            batchCount += 1
          }else{
            //数据条数小于 batchsize
            val jsonRDD: RDD[String] = spark.sparkContext.parallelize(dataStrs.seq)
            val frame: DataFrame = spark.read.json(jsonRDD)
            frame.na.drop()
            df2Hive(spark,frame,broadcast,batchCount)
            dataStrs.clear()
          }
        }
      }
    }else if(!filepath.endsWith(".json") && !formatTarget){
      while(null != ftpData){
        //默认第一行是表头 按，切分获取表字段
        if(ftpCount == 0){
          schemaStr = reader.readLine()
          ftpCount += 1
        }else if(0 < ftpCount && ftpCount <= dataSize){
          ftpData = reader.readLine()
          if(!"".equals(ftpData)){
            ftpDataArr += ftpData
            ftpCount += 1
          }
        }else{
          //存满2w条输出
          if(!"".equals(schemaStr) && ftpDataArr.nonEmpty){
            val frame: DataFrame = ftpData2DataFrame(spark,schemaStr,ftpDataArr)
            df2Hive(spark,frame,broadcast,batchCount)
            ftpDataArr.clear()
            log.info(s" --- 获取ftp数据条数: %d --- ".format(ftpCount))
            batchCount += 1
            ftpCount = 0
          }
        }
      }

      //循环后输出
      if(!"".equals(schemaStr) && ftpDataArr.nonEmpty){
        val frame: DataFrame = ftpData2DataFrame(spark,schemaStr,ftpDataArr)
        frame.printSchema()
        df2Hive(spark,frame,broadcast,batchCount)
        ftpDataArr.clear()
        batchCount += 1
        log.info(s" --- 获取ftp数据条数: %d --- ".format(ftpCount))
      }
    }

    log.info("End the work of getting ftp data...")
  }

  /**
   * ftp获取表头转化schema的方法
   * @param schemaStr 数据结构字符串
   * @return StructType
   */
  def ftpFields2Schema(schemaStr: String): StructType ={
    val fields: Array[String] = schemaStr.split(",")
    val structFields = new mutable.ListBuffer[StructField]()
    for (field <- fields) {
      structFields += StructField(field,StringType)
    }
    val structType = new StructType(structFields.toArray)
    structType
  }

  /**
   * 读取ftp连接配置及写入hive相关信息
   * @param spark SparkSession
   * @param path jsonPath
   * @return map
   */
  def readJsonConf(spark:SparkSession, path:String): Map[String,AnyRef] ={
    log.info("--- 配置文件路径：%s ---".format(path))
    val row: Row = spark.read.json(path).first()
    val map: Map[String, Nothing] = row.getValuesMap(row.schema.fieldNames)
    map
  }

  /**
   * DataFrame写入hive分区表
   * @param spark
   * @param dataFrame
   * @param broadcast
   */
  def df2Hive(spark:SparkSession, dataFrame: DataFrame, broadcast: Broadcast[(Map[String, AnyRef], String,String)],batchCount: Int): Unit = {
    val map: Map[String, AnyRef] = broadcast.value._1
    val time: String = broadcast.value._2
    val output_write_model: String = if(batchCount == 0) map.getOrElse("output_write_model", "").toString else "append"

    val output_table_name: String = map.getOrElse("output_table_name","").toString
    val output_db_name: String = map.getOrElse("output_db_name","").toString
    val output_data_partition: String = ParseDateTime.replaceDateTime(map.getOrElse("output_data_partition","").toString,time)
    val hight_file_num: String = map.getOrElse("hight_file_num","1").toString
    log.error(s" --- Start the writing of the hive table ${output_table_name} ---")

    dataFrame.printSchema()

    dataFrame.coalesce(hight_file_num.toInt).createOrReplaceGlobalTempView("temptable")

    log.info(s"mode: ${output_write_model}")
    //  log.info(s"format: ${format}")
    log.info(s"column: ${output_data_partition}")
    log.info(s"database: ${output_db_name}")
    log.info(s"tableName: ${output_table_name}")

    var fieldStr: String = getHiveTableScheme(spark,output_db_name,output_table_name,output_data_partition)

    val columnMap: mutable.Map[String, String] = checkTableEncryption(spark,map)

    if(columnMap.nonEmpty){
      val column_type_iter: Iterator[(String, String)] = columnMap.iterator
      while(column_type_iter.hasNext){
        val tuple: (String, String) = column_type_iter.next()
        val columnStr: String = tuple._1.toLowerCase
        val columnType: String = tuple._2
        log.error("columnStr: %s , columnType: %s ".format(columnStr,columnType))
        log.error(s"RSA.equals(columnType): ${"RSA".equals(columnType)}")
        log.error(s"fieldStr.contains(columnStr): ${fieldStr.contains(columnStr)}")

        //如果是RSA加密方式，则替换
        if("RSA".equals(columnType) && fieldStr.contains(columnStr)){
          val fieldSb = new mutable.StringBuilder("t.`").append(columnStr).append("`").toString()
          val newField = new mutable.StringBuilder(" stringEncrypt( t.`").append(columnStr).append("` ) as ").append(columnStr).toString()
          fieldStr = fieldStr.replaceAll(fieldSb,newField)
        }
      }
    }

    log.info(s" --- 拼接后的输出表字段顺序： %s --- ".format(fieldStr))

    //注册rsa加密函数
    spark.udf.register("stringEncrypt" ,(content: AnyRef) => {
      val publicKey = broadcast.value._3
      val outputStr = if(null!=content){
        encrypt(publicKey, content.toString)
      }else ""
      outputStr
    })


    //现只有Append和OverWrite
    output_write_model match {
      case "append" => SaveMode.Append
      case "overwrite" => SaveMode.Overwrite
    }

    //指明插入数据库
    if ("" != output_db_name) {

      spark.sql(s"use ${output_db_name}")
      spark.sql("set hive.exec.dynamic.partition.mode=nonstrict")
      log.info(s" database : ${output_db_name}")
    }

    val insertSqlSB = new mutable.StringBuilder

    if (!"overwrite".equals(output_write_model.toLowerCase)){
      //spark.sql("create table if not exists " + output_db_name + "." + output_table_name + " (id int,name string,age int,email string,password string) partitioned by ( ymd String ) STORED AS PARQUET")
      if(!"".equals(output_data_partition)){
        insertSqlSB.append("insert into table ")
          .append(output_table_name).append(" partition(")
          .append(output_data_partition)
          .append(") select ").append(fieldStr).append(" from global_temp.`temptable` t")
      }else{
        insertSqlSB.append("insert into table ").append(output_table_name).append(" select ").append(fieldStr).append(" from global_temp.`temptable` t")
      }
    }else {
      if(!"".equals(output_data_partition)){
        insertSqlSB.append("insert overwrite table ")
          .append(output_table_name).append(" partition(")
          .append(output_data_partition)
          .append(") select ").append(fieldStr).append(" from global_temp.`temptable` t")
      }else{
        insertSqlSB.append("insert overwrite table ").append(output_table_name).append(" select ").append(fieldStr).append(" from global_temp.`temptable` t")
      }
    }

    val inputSqlString: String = insertSqlSB.toString()
    log.info(s" --- 执行语句：${inputSqlString} --- ")
    spark.sql(inputSqlString)
  }

  /**
   * 获取hive输出表的表结构
   * @param sparkSession
   * @param hiveDatabaseName
   * @param hiveTableName
   * @return
   */
  def getHiveTableScheme(sparkSession: SparkSession, hiveDatabaseName: String, hiveTableName:String,partition:String): String ={
    val fieldsSB = new mutable.StringBuilder()
    //获取分区字段
    val tableColumns = new ListBuffer[String]
    var fieldStr = ""

    sparkSession.sql(s"use ${hiveDatabaseName}")
    val frame: DataFrame = sparkSession.sql(s"desc ${hiveTableName}")
    frame.show()
    val colNameDF: DataFrame = frame.select("col_name")
    val rows: Array[Row] = colNameDF.collect()
    //重复字段处理——desc 会把partition字段展示出来，这里再获取的时候要处理掉
    for (row <- rows) {
      breakable{
        val string: String = row.get(0).toString
        if("".equals(string) || string.startsWith("#")){
          break()
        }else{
          if(!tableColumns.contains(string)){
            tableColumns += string
          }else {
            tableColumns -= string
          }
        }
      }
    }

    for(size <-  tableColumns.indices){
      fieldsSB.append("t.`").append(tableColumns(size)).append("`,")
    }

    fieldStr = fieldsSB.deleteCharAt(fieldsSB.length-1).toString()

    fieldStr
  }

  /**
   * 公钥加密
   * @param key 加密私钥
   * @param data 待加密的数据
   * @return encodeToString 加密后的数据
   */
  def encrypt(key: String, data: String): String ={
    var encodeToString = ""
    if(!"".equals(data) && null != data){
      val decode = Base64.getDecoder().decode(key)
      val pkcs8EncodedKeySpec: PKCS8EncodedKeySpec = new PKCS8EncodedKeySpec(decode)
      val kf = KeyFactory.getInstance("RSA")
      val generatePrivate = kf.generatePrivate(pkcs8EncodedKeySpec)
      val ci = Cipher.getInstance("RSA")
      ci.init(Cipher.ENCRYPT_MODE, generatePrivate)

      val bytes = data.getBytes()
      val inputLen = bytes.length
      var offLen = 0//偏移量
      var i = 0
      val bops = new ByteArrayOutputStream()
      while(inputLen - offLen > 0){
        var cache: Array[Byte] = null
        if(inputLen - offLen > 117){
          cache = ci.doFinal(bytes, offLen,117)
        }else{
          cache = ci.doFinal(bytes, offLen,inputLen - offLen)
        }
        bops.write(cache)
        i+=1
        offLen = 117 * i
      }
      bops.close()
      val encryptedData = bops.toByteArray()
      encodeToString = Base64.getEncoder().encodeToString(encryptedData)
    }

    encodeToString
  }

  /**
   * 将rdd转换成dataframe
   * @param sparkSession
   * @param ftpRDD
   * @param schema
   * @return DataFrame
   */
  def ftpDataToDataFrame(sparkSession: SparkSession, ftpRDD:RDD[(String,String)], schema: StructType): DataFrame = {

    //形成dataframe应该有格式转换甚至etl过程,此处为测试数据的转化方式
    val ftpSchemaAndDataStr: String = ftpRDD.map(tuple => tuple._2).collect()(0)
    val index: Int = ftpSchemaAndDataStr.indexOf("\n")
    val ftpDataStr: String = ftpSchemaAndDataStr.substring(index+1,ftpSchemaAndDataStr.length)
    val ftpDataStrRDD: RDD[String] = sparkSession.sparkContext.parallelize(ftpDataStr.split("\n"))
    val ftpDataRowRDD: RDD[Row] = ftpDataStrRDD.map {
      case line =>
        val strings: Array[String] = line.split(",")
        Row.fromSeq(strings.toSeq)
    }
    val frame: DataFrame = sparkSession.createDataFrame(ftpDataRowRDD,schema)

    frame
  }

  /**
   * 获取FtpClient方法
   * @return
   */
  def getFtpClient(host: String, port: String, user: String, passwd: String): FtpClient ={
    val client: FtpClient = FtpUtil.connectFtp(host, port.toInt, user, passwd)
    client
  }

  /**
   * 用不上,wholeTestFile读取过来都是String,只匹配StringType 需要的话得先将数据类型从string -> 其他
   * 再进行匹配对应的类型 int -> IntegerType
   * @param str
   * @return
   */
  def matchType(str: String): DataType = {
    str match {
      case "String" => StringType
      case "int" => IntegerType
      case "float" => FloatType
      case "boolean" => BooleanType
      case "double" => DoubleType
      case "long" => LongType
      case "date" => DateType
      case "short" => ShortType
      case "timestampe" => TimestampType
    }
  }

  /**
   * 获取sapCsvData 的schmea（默认第一行）
   * @param spark
   * @param ftpSchemaAndData
   * @return StructType
   */
  def getFtpDataSchema(spark:SparkSession,ftpSchemaAndData: Array[String]): StructType = {

    //取出schema列
    val fields: String = ftpSchemaAndData(0).split("\n")(0)
    val colums: Array[String] = fields.split(",")
    log.info(s" --- schema : ${colums.mkString(",")} --- ")
    val structFieldList = new ListBuffer[StructField]()
    for (colum <- colums) {
      structFieldList += StructField(colum,StringType,true)
    }
    val structType: StructType = StructType(structFieldList)
    structFieldList.clear()
    structType
  }

  /**
   * 获取SparkSession
   * @return
   */
  def getSparkSession():SparkSession = {
    val spark: SparkSession = SparkSession.builder().appName(s"${this.getClass.getSimpleName} ").enableHiveSupport().config("spark.speculation", false)
      //设置源数据信息地址/user/hive/warehouse/
      //.config("spark.sql.warehouse.dir", "hdfs://bigdata-test-2:8020/user/hive/warehouse/")
      .config("spark.debug.maxToStringFields", "300")
      .getOrCreate()
    spark
  }

  /**
   * 检测 table 是否有字段加密
   * @param spark sparkSession
   * @param map json inner map
   */
  def checkTableEncryption(spark: SparkSession, map:Map[String,AnyRef]): mutable.Map[String, String] ={

    val columnMap: mutable.Map[String, String] = mutable.Map[String,String]()

    val driver: String = PropertiesUtils.getValue(this.properties,"driver")
    val userName: String = PropertiesUtils.getValue(this.properties,"username")
    val passwd: String = PropertiesUtils.getValue(this.properties,"password")
    val url: String = PropertiesUtils.getValue(this.properties,"mysqlUrl")

    val properties = new Properties()
    properties.put("driver", driver)
    properties.put("user", userName)
    properties.put("url", url)
    properties.put("password", passwd)

    val allTableIds: String = try{
      map.get("allTableIds").get.toString
    }catch {
      case exception: Exception => "1"
    }

    val tableSql = "SELECT " +
      "a.table_id AS tableId, " +
      "c.db_name AS dbName, " +
      "c.table_name AS tableName, " +
      "a.field_name, " +
      "a.field_alias AS fieldAlias, " +
      "a.field_type AS fieldType, " +
      "IFNULL(b.permiss_name,'不加密') AS permissName " +
      "  FROM " +
      "bdp_table_field AS a " +
      "LEFT JOIN bdp_table_info AS c ON c.id = a.table_id " +
      "LEFT JOIN bdp_data_permission_param AS b ON a.ency_style_id = b.id " +
      s"WHERE c.id IN (${allTableIds}) " +
      "AND b.permiss_name != '不加密' "

    val df = spark.read.jdbc(url, s"(${tableSql}) as m",properties)
    val empty: Boolean = df.isEmpty

    if(!empty){
      val rows: Array[Row] = df.select("field_name","permissName").collect()
      for (row <- rows) {
        val columnStr: String = row.get(0).toString
        val columnPer: String = row.get(1).toString
        columnMap.put(columnStr,columnPer)
      }
    }
    columnMap
  }
}