package cn.doitedu.dwetl.other

import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.types.{DataTypes, StructType}

object 信息熵 {

  def main(args: Array[String]): Unit = {

    val spark = SparkSession.builder().master("local").appName("").getOrCreate()

    val schema = new StructType()
      .add("sex",DataTypes.StringType)
      .add("sexp",DataTypes.DoubleType)
      .add("age",DataTypes.IntegerType)
      .add("agep",DataTypes.DoubleType)
      .add("job",DataTypes.StringType)
      .add("jobp",DataTypes.DoubleType)
    val df = spark.read.schema(schema).option("header",true).csv("testdata/other/测试数据.txt")

    spark.udf.register("entropy",EntropyUDAF2)

    df.createTempView("df")
    df.select("sexp","agep","jobp").show(100,false)

    spark.sql(
      """
        |select
        |entropy(sex,cast(sexp as double)) as sexp_entropy,
        |entropy(age,cast(agep as double)) as agep_entropy,
        |entropy(job,cast(jobp as double)) as jobp_entropy
        |from df
        |
        |""".stripMargin).show(100,false)


    spark.close()
  }

}
