package com.feidee.fd.sml.algorithm.util

import org.apache.spark.sql.DataFrame

/**
  * @Author songhaicheng
  * @Date 2019/5/15 15:02
  * @Description
  * @Reviewer
  */
object DataFrameUtils {

  def getColumnValues(data: DataFrame, col: String): Array[String] = {
    data.select(col).rdd.map(_.get(0).toString).treeAggregate(Array.empty[String])(
      seqOp = (dic, colVal) => {
        dic :+ colVal
      },
      combOp = (d1, d2) => {
        d1.union(d2)
      }
    )
  }

  def getColumnDistinctValues(data: DataFrame, col: String): Array[String] = {
    data.select(col).rdd.map(_.get(0).toString).treeAggregate(Array.empty[String])(
      seqOp = (dic, colVal) => {
        if (!dic.contains(colVal)) {
          dic :+ colVal
        } else {
          dic
        }
      },
      combOp = (d1, d2) => {
        d1.union(d2.diff(d1))
      }
    ).distinct    // 最后用 distinct，防止一开始就有同样值记录被 partitioned 到同一个分区
  }

}
