//package com.qingguo.MD5.verifyMd5
//
//import org.apache.hadoop.conf.Configuration
//import org.apache.hadoop.fs.{FileSystem, Path}
//import org.apache.spark.sql.{SaveMode, SparkSession}
//import verifyMd5.VerifyMd5.Md5Verifier
//
//object MD5ComparisonForFixedLen {
//  def main(args: Array[String]): Unit = {
//    //
//    val tableName = args(0) //table name：SDATA.S66_Z010_ORG
//    val inFileHdfsDir = args(1) // input file (hdfs dir)：/user/edw/tmp/CS_Z010_ORG_20200702.dat
//    val count = args(2)  // number of line for comparison：3
//    var whereExp = ""
//    if (args.length > 3) {
//      var dt = args(3)//bizDate：20200702
////      where dt = '20200702'
//      whereExp = " where dt = '" + dt + "'"
//    }
//
////    /user/edw/tmp/CS_Z010_ORG_20200702.dat.diff.log
//    val outputDir = new Path(inFileHdfsDir + ".diff.log")
//
//    val conf = new Configuration
//    val hdfs: FileSystem = FileSystem.get(conf)
//    //删除已存在的差分日志目录
//    if (hdfs.exists(outputDir) && hdfs.isDirectory(outputDir)) {
//      hdfs.delete(outputDir, true)
//    }
//
//    val spark = SparkSession.builder()
////      .master("local")
//      .config("spark.sql.sources.partitionOverwriteMode","dynamic")
//      .config("hive.exec.dynamic.partition.mode","nonstrict")
////      .config("spark.sql.shuffle.partitions","20")
//      .appName("MD5ComparisonForFixedLen").enableHiveSupport().getOrCreate()
//
////    导入隐式转换
//    import spark.implicits._
//
//    val (colIdx, colData, colMd5) = ("idx", "data", "md5_coding")
//
//    // hive table data Df
//    val sqlFullData = spark.sql("SELECT * from " + tableName + whereExp).select("md5_coding")
//
//    // instantiation MD5 class
//    val md5 = new Md5Verifier()
//     //hdfs dir：/user/edw/tmp/CS_Z010_ORG_20200702.dat
//    val inFileRDD = spark.sparkContext.textFile(inFileHdfsDir)
//    val inFileMD5RDD = inFileRDD
//      .zipWithIndex().map(x=>{
////      mkString把集合转成字符串
//        val data = x._1.split(' ').map(s=>md5.replaceDouble(s)).mkString
////      md5Verify?
//        val md5Val = md5.md5Verify(data)
////      println("**************************************** md5Val=" + md5Val)
////      编号(0，1，2)/values/
//        (x._2, x._1, md5Val)// idx, data, md5_coding
//    })
//
//    var limitCnt = inFileMD5RDD.count().toInt
//    if (count.toLong < limitCnt) {
//      limitCnt = count.toInt
//    }
//
////    (colIdx, colData, colMd5) = ("idx", "data", "md5_coding")
//    val inFileMD5DF = inFileMD5RDD.toDF(colIdx, colData, colMd5)
//    val crossData = inFileMD5DF
//      .limit(limitCnt)
//      .join(sqlFullData, colMd5)
//      .map(x=>{
////        val idxes = colNames.map(c=>x.fieldIndex(c))
////        fieldIndex：
//        (x.getLong(x.fieldIndex(colIdx)), x.getString(x.fieldIndex(colData)), x.getString(x.fieldIndex(colMd5)))
//      }).toDF(colIdx, colData, colMd5)
////    inFileMD5DF.show()
////    sqlFullData.show()
////    crossData.show()
//    val num = crossData.count()
//    if (num != limitCnt) {
//      val diffMd5Rdd = inFileMD5DF.join(
//        inFileMD5DF.limit(limitCnt).select(colIdx).except(crossData.select(colIdx)),
//        colIdx
//      ).rdd
//        .sortBy(x=>x.getLong(x.fieldIndex(colIdx)))
//        .map(x=>x.getString(x.fieldIndex(colData)))
//
////      val schema = StructType(colNames.map(fieldName => StructField(fieldName, StringType, true)))
////      val diffMd5Ds = spark.createDataFrame(diffMd5Rdd, schema)
//      val diffMd5Ds = diffMd5Rdd.toDF()
////      diffMd5Ds.show()
//      diffMd5Ds.write.mode(SaveMode.Overwrite).text(outputDir.toString)
////      System.exit(1)
////    } else {
////      System.exit(0)
//    }
//
//  }
//
//}
//
