package cn.getech.data.development.job

import java.io.ByteArrayOutputStream
import java.security.KeyFactory
import java.security.spec.{PKCS8EncodedKeySpec, X509EncodedKeySpec}
import java.util.{Base64, Properties}

import cn.getech.data.development.job.util.{JdbcUtil, ParseDateTime, PropertiesUtils, RangerRestUtil}
import javax.crypto.Cipher
import org.apache.spark.broadcast.Broadcast
import org.apache.spark.sql.{DataFrame, Row, SparkSession}
import org.slf4j.LoggerFactory

import scala.collection.mutable
import scala.collection.mutable.ListBuffer
import scala.util.control.Breaks.{break, breakable}

object DBData2Hive {

  private val log = LoggerFactory.getLogger(this.getClass)
  private val properties: Properties = PropertiesUtils.getProperties("common.properties")
  private val publicKey = PropertiesUtils.getValue(properties,"inputPublicKey")
  private val rangerUrl = PropertiesUtils.getValue(properties,"rangerUrl")
  private val rangerUtil: RangerRestUtil = new RangerRestUtil(rangerUrl)

  def main(args: Array[String]): Unit = {

    //参数检验
    if ( args.length < 2 ){

      println("At least need two parameter <jobId> <time> ")


      System.exit(2)
    }
    //jsonPath
    val jobId = args(0)
    //分区时间
    val time = args(1)

    val path = "hdfs:///bdp/jobconfig/1/jobid_" + jobId + ".json"

    log.info(s"- - - config dir :${path}")
    val spark: SparkSession = getSparkSession()
    //读取配置Json
    val map: Map[String, String] = readConfig(spark,path)

    //检测map中的必填条件
    val isCorrect: Boolean = checkJobConfig(map)

    //检查用户的输出表权限
    val isOwner = checkUserPolicy(map,rangerUrl)

    if(isOwner && isCorrect){
      val boradcast: Broadcast[(Map[String, String], String, String)] = spark.sparkContext.broadcast((map,time,publicKey))

      //获取数据
      val frame: DataFrame = sparkReadDBDate(spark,boradcast)

      //写入hive
      dfData2hive(spark,frame,boradcast)
    }else{
      throw new RuntimeException (s"You do not have permission to write to the table ${map.getOrElse("output_table_name","")}")
    }

    spark.stop()
  }


  /**
   * 检查配置文件必需项的是否完整
   * @param map 任务配置文件
   * @return boolean 是否必需项完整
   */
  def checkJobConfig(map: Map[String, String]): Boolean = {
    var correct = true

    //checkRDBSource
    val rdbUrl = map.get("url")

    if(rdbUrl.isEmpty){
      correct = false
    }

    val rdbUser = map.get("user")

    if(rdbUser.isEmpty){
      correct = false
    }

    val rdbPasswd = map.get("passwd")

    if(rdbPasswd.isEmpty){
      correct = false
    }

    val driverName = map.get("driverName")

    if(driverName.isEmpty){
      correct = false
    }

    val input_input_content = map.get("input_input_content")

    if(input_input_content.isEmpty){
      correct = false
    }

    //checkHiveSink
    val sinkTableName = map.get("output_table_name")

    if(sinkTableName.isEmpty){
      correct = false
    }

    val sinkDbName = map.get("output_db_name")

    if(sinkDbName.isEmpty){
      correct = false
    }

    correct
  }

  /**
   * 检验用户权限
   * @param map 配置文件
   * @param rangerUrl rangerUrl
   * @return 是否有权限，true/false
   */
  def checkUserPolicy(map: Map[String, String], rangerUrl: String): Boolean = {

    val ranger_user: String = map.getOrElse("rangerUserName","hdfs")
    val output_table_name: String = map.getOrElse("output_table_name","")
    val output_db_name: String = map.getOrElse("output_db_name","")

    log.info(s"- - - user:${ranger_user},targetDB:${output_db_name},targetTable:${output_table_name}")

    val result: Boolean = rangerUtil.queryPermissionByUserAndTable(ranger_user,output_db_name,output_table_name)

    result
  }

  /**
   * 获取SparkSession
   * @return SparkSession
   */
  def getSparkSession():SparkSession = {
    //.config("spark.sql.warehouse.dir", "hdfs://bigdata-test-2:8020/user/hive/warehouse/")
    val spark = SparkSession.builder().appName("DBDate2Hive").config("spark.debug.maxToStringFields", "200").enableHiveSupport().getOrCreate()
    spark
  }

  /**
   * 读取DataBase数据
   * @param spark sparkSession
   * @return DataFrame
   */
  def sparkReadDBDate(spark:SparkSession, broadcast: Broadcast[(Map[String, String], String, String)]):DataFrame = {

    val map: Map[String, String] = broadcast.value._1
    val time: String = broadcast.value._2
    var df: DataFrame = null

    val url: String = map.getOrElse("url","")
    val user: String = map.getOrElse("user","")

    //数据库和表
    val input_db_name: String = map.getOrElse("input_db_name","")
    val input_table_name: String = map.getOrElse("input_table_name","")
    //sql语句
    val input_input_content: String = ParseDateTime.replaceDateTime(map.getOrElse("input_input_content",""),time)
    val properties: Properties = dbConnProperties(map)

    log.info("- - - url: %s ".format(url))
    log.info("time: %s ".format(time))
    log.info("username: %s ".format(user))
    log.info("database: %s ".format(input_db_name))
    log.info("dbname: %s ".format(input_table_name))
    log.info("input_input_content: %s ".format(input_input_content))
    //不能包含分号
    val dbsql = input_input_content.replaceAll(";"," ")

    //TODO sqlserver,postgre,oracle 分页设置
    if(url.startsWith("jdbc:mysql://")){
      val partitions = new ListBuffer[String]
      //
      for(index <- 1 to 10){
        val sb = new StringBuilder()
        sb.append(" 1 = 1 limit ").append(200000 * (index-1)).append(" , ").append(200000 * index)
        partitions += sb.toString()
      }

      df = spark.read.jdbc(url, s"(${dbsql}) as m",partitions.toArray,properties)
    }else{
      df = spark.read.jdbc(url, s"(${dbsql}) as m", properties)
    }

    df
  }

  /**
   * DataFrame插入hive表
   * @param spark
   * @param dataFrame
   */
  def dfData2hive(spark:SparkSession,dataFrame: DataFrame,broadcast: Broadcast[(Map[String, String], String, String)]): Unit ={
    val map: Map[String, String] = broadcast.value._1
    val time: String = broadcast.value._2

    val output_write_model: String = map.getOrElse("output_write_model", "")
    val output_table_name: String = map.getOrElse("output_table_name","")
    val output_db_name: String = map.getOrElse("output_db_name","")
    val output_data_partition: String = ParseDateTime.replaceDateTime(map.getOrElse("output_data_partition",""),time)
    val hight_file_num:String = map.getOrElse("hight_file_num","1")

    var fieldStr: String = ""

    dataFrame.coalesce(hight_file_num.toInt)
    log.error(s" --- 读取的mysql数据条数：${dataFrame.collect().length} --- ")
    log.info(s" --- mode: ${output_write_model} --- ")
    log.info(s" --- partition: ${output_data_partition} --- ")
    log.info(s" --- database: ${output_db_name} --- ")
    log.info(s" --- tableName: ${output_table_name} --- ")

    val tableColumns = new ListBuffer[String]

    //指明插入数据库
    if ("" != output_db_name) {
      val fieldsSB = new mutable.StringBuilder()

      spark.sql(s"use ${output_db_name}")

      spark.sql("set hive.exec.dynamic.partition.mode=nonstrict")

      log.info(s" --- database_dbName : ${output_db_name} --- ")
      log.info(s" --- database_tableName : ${output_table_name} --- ")

      val frame: DataFrame = spark.sql(s"desc ${output_table_name}")

      //hive表机密字段map
      val rsaColumnMap:mutable.Map[String,String] = checkTableEncryption(spark,map)

      //hive表字段顺序列
      val hiveTableColumns: ListBuffer[String] = mutable.ListBuffer[String]()
      //hive表去除分区字段列
      val hiveTableColMap: mutable.Map[String,String] = mutable.Map[String,String]()

      if(!frame.isEmpty){
        val hiveTableRow: Array[Row] = frame.select("col_name","data_type").collect()
        for (row <- hiveTableRow) {
          val columnStr: String = row.get(0).toString
          val columnType: String = row.get(1).toString
          if(!hiveTableColMap.contains(columnStr)){
            hiveTableColMap.put(columnStr,columnType)
            hiveTableColumns += columnStr
          }else{
            hiveTableColMap.remove(columnStr)
            hiveTableColumns -= columnStr
          }
        }
      }

      //根据hive表的字段顺序 获取sql string
      for (hiveTableColumn <- hiveTableColumns) {

        val columnType: String = hiveTableColMap.getOrElse(hiveTableColumn,"")
        val isRsa: String = rsaColumnMap.getOrElse(hiveTableColumn,"")

        if(!"".equals(columnType)){
          if("string".equals(columnType.toLowerCase) && "rsa".equals(isRsa.toLowerCase)){

            //是string类型需要去除\n  是rsa加密类型 需要加密
            fieldsSB.append(" regexp_replace(").append(" stringEncrypt( t.`").append(hiveTableColumn).append("` ), '[\\r\\n]', '') as ").append(hiveTableColumn).append(" ,")

          }else if(!"string".equals(columnType.toLowerCase) && "rsa".equals(isRsa.toLowerCase)){

            //不是string类型不需要去除\n  是rsa加密类型 需要加密
            fieldsSB.append(" stringEncrypt( t.`").append(hiveTableColumn).append("` ) as ").append(hiveTableColumn).append(" ,")

          }else if("string".equals(columnType.toLowerCase) && !"rsa".equals(isRsa.toLowerCase)){

            //是string类型需要去除\n  不是rsa加密类型 不需要加密
            fieldsSB.append(" regexp_replace(").append(hiveTableColumn).append(" ,'[\\r\\n]' ,'') as ").append(hiveTableColumn).append(" ,")

          }else if(!"string".equals(columnType.toLowerCase) && !"rsa".equals(isRsa.toLowerCase)){

            //不是string类型不需要去除\n  不是rsa加密类型 需要加密
            fieldsSB.append("t.`").append(hiveTableColumn).append("` ,")

          }
        }
      }

      fieldStr = fieldsSB.deleteCharAt(fieldsSB.length-1).toString()
    }

    log.info(s" --- 拼接后的输出表字段顺序： ${fieldStr} --- ")
    log.info(s" --- privateKey： ${broadcast.value._3} --- ")
    //dataFrame.show(3)
    import spark.implicits._
    //注册rsa加密函数
    spark.udf.register("stringEncrypt" ,(content: AnyRef) => {
      val publicKey = broadcast.value._3
      val outputStr = if(null!=content){
        encrypt(publicKey, content.toString)
      }else ""
      outputStr
    })

    //如果数据表结构和hive表结构不一致的话使用sql直接插入/或是写入hdfs直接load（效率更高）
    dataFrame.createOrReplaceGlobalTempView("tempTable")
    dataFrame.printSchema()
    val insertSqlSB = new mutable.StringBuilder()

    if (!"overwrite".equals(output_write_model.toLowerCase)){
      //spark.sql("create table if not exists " + output_db_name + "." + output_table_name + " (id int,name string,age int,email string,password string) partitioned by ( ymd String ) STORED AS PARQUET")
      if(!"".equals(output_data_partition)){
        insertSqlSB.append("insert into table ")
          .append(output_table_name).append(" partition(")
          .append(output_data_partition)
          .append(") select ").append(fieldStr).append(" from `global_temp`.`tempTable` t")
      }else{
        insertSqlSB.append("insert into table ").append(output_table_name).append(" select ").append(fieldStr).append(" from `global_temp`.`tempTable` t")
      }
    }else {
      if(!"".equals(output_data_partition)){
        insertSqlSB.append("insert overwrite table ")
          .append(output_table_name).append(" partition(")
          .append(output_data_partition)
          .append(") select ").append(fieldStr).append(" from global_temp.tempTable t")
      }else{
        insertSqlSB.append("insert overwrite table ").append(output_table_name).append(" select ").append(fieldStr).append(" from `global_temp`.`tempTable` t")
      }
    }

    val inputSqlString: String = insertSqlSB.toString()
    log.info(s" --- 执行语句：${inputSqlString} --- ")

    spark.sql(inputSqlString)
  }

  /**
   * 公钥加密
   * @param key 加密私钥
   * @param data 待加密的数据
   * @return encodeToString 加密后的数据
   */
  def encrypt(key: String, data: String): String = {
    var encodeToString = ""
    if(!"".equals(data) && null != data){
      val decode = Base64.getDecoder().decode(key)
      val pkcs8EncodedKeySpec: PKCS8EncodedKeySpec = new PKCS8EncodedKeySpec(decode)
      val kf = KeyFactory.getInstance("RSA")
      val generatePrivate = kf.generatePrivate(pkcs8EncodedKeySpec)
      val ci = Cipher.getInstance("RSA")
      ci.init(Cipher.ENCRYPT_MODE, generatePrivate)

      val bytes = data.getBytes()
      val inputLen = bytes.length
      var offLen = 0//偏移量
      var i = 0
      val bops = new ByteArrayOutputStream()
      while(inputLen - offLen > 0){
        var cache: Array[Byte] = null
        if(inputLen - offLen > 117){
          cache = ci.doFinal(bytes, offLen,117)
        }else{
          cache = ci.doFinal(bytes, offLen,inputLen - offLen)
        }
        bops.write(cache)
        i+=1
        offLen = 117 * i
      }
      bops.close()
      val encryptedData = bops.toByteArray()
      encodeToString = Base64.getEncoder().encodeToString(encryptedData)
    }

    encodeToString
  }

  /**
   * 读取mysql配置信息和hive指定
   * @param spark
   * @param path
   * @return
   */
  def readConfig(spark: SparkSession,path:String):Map[String,String]={
    val row: Row = spark.read.json(path).first()
    val map: Map[String, Nothing] = row.getValuesMap(row.schema.fieldNames)
    map
  }

  /**
   * 形成mysql连接配置文件
   * @param map
   * @return
   */
  def dbConnProperties(map:Map[String,String]):Properties = {
    val connProperties = new Properties()
    val user: String = map.getOrElse("user", "")
    val driver: String = map.getOrElse("driverName","")
    val url: String = map.getOrElse("url","")
    val passwd: String = map.getOrElse("passwd", "")

    val realPasswd: String = JdbcUtil.rsaDecrypt(passwd)

    connProperties.put("driver", driver)
    connProperties.put("user", user)
    connProperties.put("url", url)
    connProperties.put("password", realPasswd)
    //connProperties.put("password", passwd)
    connProperties
  }

  /**
   * 检测 table 是否有字段加密
   * @param spark sparkSession
   */
  def checkTableEncryption(spark: SparkSession, map:Map[String,String]): mutable.Map[String, String] ={

    val columnMap: mutable.Map[String, String] = mutable.Map[String,String]()

    val driver: String = PropertiesUtils.getValue(this.properties,"driver")
    val userName: String = PropertiesUtils.getValue(this.properties,"username")
    val passwd: String = PropertiesUtils.getValue(this.properties,"password")
    val url: String = PropertiesUtils.getValue(this.properties,"mysqlUrl")

    val properties = new Properties()
    properties.put("driver", driver)
    properties.put("user", userName)
    properties.put("url", url)
    properties.put("password", passwd)

    val allTableIds: String = map.getOrElse("allTableIds","1")

    val tableSql = "SELECT " +
      "a.table_id AS tableId, " +
      "c.db_name AS dbName, " +
      "c.table_name AS tableName, " +
      "a.field_name, " +
      "a.field_alias AS fieldAlias, " +
      "a.field_type AS fieldType, " +
      "IFNULL(b.permiss_name,'不加密') AS permissName " +
      "  FROM " +
      "bdp_table_field AS a " +
      "LEFT JOIN bdp_table_info AS c ON c.id = a.table_id " +
      "LEFT JOIN bdp_data_permission_param AS b ON a.ency_style_id = b.id " +
      s"WHERE c.id IN ( ${allTableIds} ) " +
      "AND b.permiss_name != '不加密' "

    val df = spark.read.jdbc(url, s"(${tableSql}) as m",properties)
    val empty: Boolean = df.isEmpty

    if(!empty){
      val rows: Array[Row] = df.select("field_name","permissName").collect()
      for (row <- rows) {
        val columnStr: String = row.get(0).toString
        val columnPer: String = row.get(1).toString
        columnMap.put(columnStr,columnPer)
      }
    }
    columnMap
  }
}