package cn.lecosa.util

import org.apache.commons.lang3.StringUtils
import org.apache.log4j.{Level, Logger}
import org.apache.spark.SparkConf
import org.apache.spark.sql.types.{StringType, StructField, StructType}
import org.apache.spark.sql.{Row, SparkSession}

import scala.collection.JavaConversions._
import scala.collection.mutable.ListBuffer

/**
  * Demo class
  *
  * @author administrator
  * @date 2019/1/2
  */
object Hive2HiveBzh {
  Logger.getLogger("org.apache").setLevel(Level.ERROR)
  val log = Logger.getLogger("Hive2HiveBzh")

  def main(args: Array[String]): Unit = {
    log.info("标准化任务开始***********************************************")
    // 获取表基本信息和sparksession
    val bzhProperyAndSparksession = getSparkSession(args)
    val bzhProperty = bzhProperyAndSparksession._1
    val sparksession = bzhProperyAndSparksession._2
    val dmService = DmService
    val dmValue_bc = sparksession.sparkContext.broadcast(dmService.getalldm)
    val mcValue_bc = sparksession.sparkContext.broadcast(dmService.getalldmvalue)

    val lastTime = bzhProperty.getLasttime()
    val primarykey = bzhProperty.getPrimaryKey()

    if (primarykey.isEmpty) {
      println("查询不到主属性，程序退出")
      System.exit(0)
    }

    if ("ml_qszxsj" eq (lastTime)) {
      println("查询不到时间字段，程序退出")
      System.exit(0)
    }
    // 获取表得全部信息
    val bmysb = bzhProperty.getBmys().get
    val addPartitionSql = bzhProperty.addPartition()

    //如果表存在，则不做任何操作，如果不存在，则创建
    val createHiveTablDDl = bzhProperty.createHiveDDL()
    println(s"创建hive表语句：${createHiveTablDDl}")
    sparksession.sql(createHiveTablDDl)
    sparksession.sql(bzhProperty.alterTableNull())

    // 查询语句
    val querySql = bzhProperty.getQuerySql()
    println(s"查询语句: ${querySql}")
    //dataframe.na.fill(map)，即可实现对NULL值的填充。
    val dataFrame = sparksession.sql(querySql).na.fill("") 

    val frameCount = dataFrame.count()
    if (frameCount == 0) {
      println("没有查询到数据，程序退出")
      System.exit(0)
    }

    // 根据查询语句，获取查询schema信息
    val tuples = dataFrame.schema.map(x => {
      (x.name, x.dataType)
    })

    // 填充原始数据项的 datatype信息
    bzhProperty.fillSjxlist(tuples)
    // 广播变量
    val sjxlist_bc = sparksession.sparkContext.broadcast(bzhProperty.getsjxList)
    val schenma_bc = sparksession.sparkContext.broadcast(bzhProperty.getSchema)
    val saveMode_bc = sparksession.sparkContext.broadcast(bzhProperty.getSaveMode)
    val savePath_bc = sparksession.sparkContext.broadcast(bzhProperty.getSavePath)
    val primaryKey_bc = sparksession.sparkContext.broadcast(primarykey)
    val lasttime_bc = sparksession.sparkContext.broadcast(lastTime)


    val partitionsNum = ((frameCount / (20 * 10000)) + 1).toInt
    val convertRdd = dataFrame.rdd.repartition(partitionsNum).map(row => {
      // 定义list,为生成Row做准备
      var valueArr = new ListBuffer[Any]()
      val strbuffer = new StringBuffer
      val bytes: Array[Byte] = Array[Byte](1)
      val separator: String = new String(bytes, "GBK")
      // 定义map,放入springEL 中
      val allValue = new java.util.HashMap[String, Object]()
      sjxlist_bc.value.map(x => {
        val value: java.lang.Object = row.getAs(x.yssjxmc)
        allValue.put(x.bzhsjxmc.toUpperCase, value)
        //        println(x.bzhsjxmc.toUpperCase + " 参数 " + value)
      })
 

      // 对字段进行标准化，码值字段标准化
      var mlsfcg: Int = 0
      sjxlist_bc.value.map(x => {
        var tmpValue: Any = row.getAs(x.yssjxmc)
        if (!"".equals(StringUtils.trimToEmpty(x.nrbzh))) {
          // TODO  码表标准化使用springEL 之后，可以替换掉
          if (x.nrbzh.startsWith("dm")) {
            val tmpstr = StringUtils.trimToEmpty(tmpValue.toString)
            if (x.bzhsjxmc.endsWith("_999mc")) {
              val value = mcValue_bc.value
              val maybeString = value.get(x.nrbzh + "_" + tmpstr)
              if (maybeString.isEmpty) {
                tmpValue = tmpstr;
                mlsfcg += 100000
              } else {
                tmpValue = maybeString.get
              }
            } else {
              val value = dmValue_bc.value
              val maybeString = value.get(x.nrbzh + "_" + tmpstr)
              if (maybeString.isEmpty) {
                tmpValue = tmpstr;
                mlsfcg += 100000
              } else {
                tmpValue = maybeString.get
              }
            }
          } else {
            //          println("EL表达式***********************" + x.nrbzh)

          }
        }

        /** 生成text格式数据 */

        if (tmpValue != null) {
          if ("BIGINT".equalsIgnoreCase(x.bzhsjlx)) {
            try {
              //              valueArr +=  StringUtils.trimToEmpty(tmpValue.toString).toLong
              strbuffer.append(StringUtils.trimToEmpty(tmpValue.toString).toLong).append(separator)
            } catch {
              case _ => {
                //                valueArr += null
                mlsfcg = 10000000;
                strbuffer.append(" ")
              }
            }
          } else if ("DOUBLE".equalsIgnoreCase(x.bzhsjlx)) {
            try {
              //              valueArr +=    StringUtils.trimToEmpty(tmpValue.toString).toDouble
              strbuffer.append(StringUtils.trimToEmpty(tmpValue.toString).toDouble).append(separator)
            } catch {
              case _ => {
                //                valueArr += null
                mlsfcg = 10000000;
                strbuffer.append(" ")
              }
            }
          } else if ("INT".equalsIgnoreCase(x.bzhsjlx)) {
            try {
              //              valueArr += StringUtils.trimToEmpty(tmpValue.toString).toInt
              strbuffer.append(StringUtils.trimToEmpty(tmpValue.toString).toInt).append(separator)
            } catch {
              case _ => {
                //                valueArr += null
                mlsfcg = 10000000;
                strbuffer.append(" ")
              }
            }
          } else if ("DECIMAL".equalsIgnoreCase(x.bzhsjlx)) {
            try {
              //              valueArr += scala.math.BigDecimal(StringUtils.trimToEmpty(tmpValue.toString))
              strbuffer.append(scala.math.BigDecimal(StringUtils.trimToEmpty(tmpValue.toString))).append(separator)
            } catch {

              case _ => {
                //                valueArr += null
                mlsfcg = 10000000;
                strbuffer.append(" ")
              }
            }

          } else {
            //TODO 码值标准化字段,默认增加一个中文描述字段的值
            if (StringUtils.isNotEmpty(x.nrbzh) && x.nrbzh.contains("ysdmToBzhdm")) {
              val codeArr = tmpValue.toString.split("_")
              //              valueArr += codeArr(0)
              //              valueArr += codeArr(1)
              strbuffer.append(codeArr(0)).append(separator)
              strbuffer.append(codeArr(1)).append(separator)
            } else {
              //              valueArr +=  StringUtils.trimToEmpty(tmpValue.toString)
              strbuffer.append(StringUtils.trimToEmpty(tmpValue.toString)).append(separator)
            }

          }
        } else {

          //          valueArr += null
          strbuffer.append(" ")
        }


      })
      //      valueArr += mlsfcg
      //      valueArr += System.currentTimeMillis()
      strbuffer.append(mlsfcg).append(separator)
      strbuffer.append(System.currentTimeMillis())
      // 转换成row
      //      Row.fromSeq(valueArr);
      Row(strbuffer.toString)
    }
    ).cache()
    //    schenma_bc.value.fields.foreach(r =>{
    //      println(r.name + "======================================================" +r.dataType)
    //    })
    //    convertRdd.collect.foreach(println)

    val repartitionNum = ((frameCount / (88 * 10000)) + 1).toInt
    /* sparksession.createDataFrame(convertRdd, schenma_bc.value).na.fill("").repartition(repartitionNum)
       .write.mode(saveMode_bc.value)
         .parquet(savePath_bc.value)*/
    val schma = StructType(Array(StructField("row", StringType)))
    sparksession.createDataFrame(convertRdd, schma).repartition(repartitionNum)
      .write.mode(saveMode_bc.value)
      .text(savePath_bc.value)

   
    sparksession.stop()
  }

  /**
    * 根据传入条件，返回sparksession
    *
    * @param args
    * @return 原始表名， 标准化表明， sparksession
    */
  private def   getSparkSession(args: Array[String]) = {

    val paramsPath = args(0);
    val yskm = args(1);
    val ysbm = args(2);

    val paramsNumStat = args.length;

    /** ******************获取程序运行配置文件内容 ********************/
    val params = PropertyUtil.getProperties(paramsPath);

    //构造随机数最大值
//    val optimizeMaxHash = if (params.containsKey("uniqueid.aggregation.maxnum")) params.getProperty("uniqueid.aggregation.maxnum").toInt else CommonUtil.getMaxHash;

    val sparkConf = new SparkConf();

    //读取properties文件conf配置
    val sparkConfPrefix = this.getClass.getSimpleName.split("\\$")(0) + ".SparkConf"


    params.foreach(spc => {
      if (spc._1.startsWith(sparkConfPrefix)) {
        val sparkConfKey = spc._1.substring(sparkConfPrefix.length() + 1, spc._1.length())
        val sparkConfValue = spc._2
        sparkConf.set(sparkConfKey, sparkConfValue)
      }
    })

    sparkConf.getAll.foreach(println)

    //    //构造searksession
    val spark = SparkSession
      .builder
      .config(sparkConf)
      .enableHiveSupport()
      .getOrCreate()

    spark.sqlContext.setConf("spark.sql.hive.convertMetastoreParquet", "false")

    //初始化connection
    val bzhproperty = BzkProperty
    bzhproperty.yskm = yskm
    bzhproperty.ysbm = ysbm

    (bzhproperty, spark)

  }

}
