package com.kingsoft.dc.khaos.module.spark.preprocess.specific.quality

import com.kingsoft.dc.khaos.util.Logging
import org.apache.spark.rdd.RDD
import org.apache.spark.sql._

/**
  * 唯一性检核
  *
  */
class SingleCheck extends Logging with Serializable {
  def analysisData(dataFrame: DataFrame, field: String): DataFrame = {
    dataFrame.groupBy(field)

    //依据所选列进行排序后去重
//    val rdd: RDD[String] = dataFrame.filter(row => row.getAs(field) != null).select(field)
//      .rdd.map(line => (line.toString(), 1)).reduceByKey(_ + _).filter(_._2 > 1).keys.distinct()
    val rdd: RDD[String] = dataFrame.select(field)
//        .rdd.map(line => (if (line.get(0) == null) Row("").toString() else line.toString(), 1))
        .rdd.map(line => (line.toString(), 1))
        .reduceByKey(_ + _).filter(_._2 > 1).keys.distinct()
    //rdd转集合
    val strings = rdd.take(rdd.count().toInt)
    //根据有重复的数据 进行过滤
    val data = dataFrame.filter(row => strings.contains(s"[${row.getAs(field)}]"))
//    val data = dataFrame.filter(row => strings.contains(s"[${if (row.getAs(field) == null)  "" else row.getAs(field)}]"))
    //排序
    val errorData = data.orderBy(field)
    errorData
  }
}

