import org.apache.spark.rdd.RDD
import org.apache.spark.sql.types.{DataTypes, StructType}
import org.apache.spark.sql.{RelationalGroupedDataset, Row, RowFactory, SparkSession}

object HyperlogDemo {
  def main(args: Array[String]): Unit = {
    val spark = SparkSession.builder().master("local").appName("").getOrCreate()
    import spark.implicits._
    import org.apache.spark.sql.functions._

    val ds = spark.createDataset(Seq(("a", "bj", 1), ("a", "bj", 2), ("b", "bj", 3), ("c", "sh", 3), ("c", "sh", 3)))
    // val l: Long = ds.rdd.groupBy(_._2).countApproxDistinct(0.000018)

    val df = ds.toDF("f1", "f2", "f3").select('f1, 'f2, 'f3, typedLit[Int](4) as "f4")
    df.show(100, false)

    df.printSchema()

    val rdd = df.rdd.map(row =>{
      Row.merge(row, Row(5))
    })
    val schema = new StructType()
        .add("f1",DataTypes.StringType)
        .add("f2",DataTypes.StringType)
        .add("f3",DataTypes.IntegerType)
        .add("f4",DataTypes.IntegerType)
        .add("f5",DataTypes.IntegerType)
    val df2 = spark.createDataFrame(rdd,schema)
    df2.show(100,false)

    spark.close()


  }

}
