package spark

import org.apache.spark.sql.functions.{udf, _}
import org.apache.spark.sql.types._
import org.apache.spark.sql.{DataFrame, Row, SparkSession}

/**
  * @author pinker on 2018/3/14import java.util.Objects
  *         import java.util
  *         import java.util.function.Consumer
  *         *
  *
  *         object Main { class Tree[T]  (var value: T, var left: Main.Tree[T], var right: Main.Tree[T] ) {
  *         }
  *         object State extends Enumeration  { type State = Value
  *         val T,V = Value
  *         }
  *         private def inorder[T](tree: Main.Tree[T], callback: Consumer[T]): Unit =  { val stack: util.Stack[Tuple2[Main.Tree[T], Main.State]] = new util.Stack[Tuple2[Main.Tree[T], Main.State]]
  *stack.push(Tuple2.of(tree, State.T))
  *         while ( { !(stack.isEmpty)})  { val t: Tuple2[Main.Tree[T], Main.State] = stack.pop
  *         val node: Main.Tree[T] = t._1
  *         val state: Main.State = t._2
  *         if (state eq State.T)  { if (node.right != null)  { stack.push(Tuple2.of(node.right, State.T))}
  *stack.push(Tuple2.of(node, State.V))
  *         if (node.left != null)  { stack.push(Tuple2.of(node.left, State.T))}
  *         continue //todo: continue is not supported
  *         }
  *         if (state eq State.V)  { callback.accept(node.value)
  *         }
  *         }
  *         }
  *         def main(args: Array[String]): Unit =  { val root: Main.Tree[Integer] = new Main.Tree[Integer](4, new Main.Tree[Integer](2, new Main.Tree[Integer](1, null, null), new Main.Tree[Integer](3, null, null)), new Main.Tree[Integer](6, new Main.Tree[Integer](5, null, null), new Main.Tree[Integer](7, null, null)))
  *         inorder(root, System.out.println)
  *         }
  *         }
  *         *
  * @SuppressWarnings (Array("ALL"))  object Tuple2 { def of[T1, T2](_1: T1, _2: T2): Tuple2[T1, T2] =  { return new Tuple2[T1, T2](_1, _2)
  *                   }
  *                   }
  * @SuppressWarnings (Array("ALL"))  class Tuple2[T1, T2] private  (val _1: T1, val _2: T2 ) {
  *                   override def equals(o: Any): Boolean =  { if (this eq o)  { return true}
  *                   if (!((o.isInstanceOf[Tuple2[_, _]])))  { return false}
  *                   val t: Tuple2[_, _] = o.asInstanceOf[Tuple2[_, _]]
  *                   return Objects.equals(_1, t._1) && Objects.equals(_2, t._2)
  *                   }
  *                   override def hashCode: Int =  { return Objects.hash(_1, _2)
  *                   }
  *                   override def toString: String =  { return "(" + _1 + ", " + _2 + ")"
  *                   }
  *                   }
  *
  */
object DataFrameDemo {

  val localPath = "D:/spark/temp/"
  val hivePath = "D:/spark/hive/"

  /**
    * select() 选择列,纵向
    * 横向:选择行
    * where
    * def where(condition: Column): Dataset[T] = filter(condition)
    * def where(conditionExpr: String): Dataset[T] = {
    * filter(Column(sparkSession.sessionState.sqlParser.parseExpression(conditionExpr)))
    * }
    * filter
    *
    * @param df
    * @param spark
    */
  def transformationWithDF(df: DataFrame, spark: SparkSession): Unit = {
    //    groupByToApplyFunc(df, spark)
    //过滤-结果DataSet
    //    handleRow(df)

    //删选|切片-还是DataFrame
    //    handleColumn(df, spark)
    //排序
    //    sortMethod(df, spark)

    //    cubeAgroupArollup(df, spark)

    //集聚
    //    groupByDemo(df, spark)

    //    getColumn(df,spark)
    /* def aggregateByKey[U](zeroValue: U, numPartitions: scala.Int)(seqOp: scala.Function2[U, V, U], combOp: scala.Function2[U, U, U])(implicit evidence$2: scala.reflect.ClassTag[U]): org.apache.spark.rdd.RDD[scala.Tuple2[K, U]] = {
       /* compiled code */
     }*/

    //    df.stat.bloomFilter()

    val df1 = spark.createDataFrame(Seq(("a", "foo", 1), ("a", "foo", 3), ("a", "bar", 2), ("a", "car", 4), ("b", "foo", 3), ("b", "car", 8), ("b", "bar", 5), ("b", "bar", 1))).toDF("xb", "y", "z")
    df1.show()
    val df2 = spark.createDataFrame(Seq(("a", "foo", 1), ("a", "foo", 400), ("a", "bar", 500))).toDF("xb", "y", "z")
    val df3 = spark.createDataFrame(Seq(("a", "foo", 1), ("a", "foo", 400), ("a", "bar", 500)))
    df3.show()
    //交集
    df1.intersect(df2).show()
    //差集
    df1.except(df2).show()
    //并集
    df1.union(df2).show()
    //    pivotDemo(df1, spark)
    import spark.implicits._
    df3.withColumnRenamed("_1", "col1").show()
    df3.withColumn("newcolum", $"_1").show()

    val df4 = spark.createDataFrame(Seq(("a", "65-66-67-68"), ("b", "35-68-37-98"), ("c", "5-60-77-28"))).toDF("stu", "scores")
    df4.select($"stu", explode(split($"scores", "-"))).show()
    //        explainGruopBy(df1, spark)
    /* .reduceGroups((t1, t2) => {
       (t1._1, t1._2 ::: t2._2)
     }).toDF("sequence", "list").show(false)*/
  }

  case class People(sequence: String, name: String, age: Int, gender: String, index: Int)

  case class Age(sequence: String, age: Int)

  private def explainGruopBy(df1: DataFrame, spark: SparkSession) = {
    import spark.implicits._
    df1.groupBy($"x").agg(variance($"z")).explain
    val ds = df1.as[Foo]
    ds.groupByKey(_.x).reduceGroups((x: Foo, y: Foo) => x.copy(z = x.z + y.z)).explain
    ds.groupByKey(_.x).reduceGroups((x, y) => x.copy(z = x.z + y.z)).show()
    ds.groupByKey(_.x).mapGroups((_, iter) => iter.reduce((x, y) => x.copy(z = x.z + y.z))).explain
  }

  private def pivotDemo(df1: DataFrame, spark: SparkSession) = {
    import spark.implicits._
    df1.groupBy('xb).pivot("y").max("z").show()
    df1.groupBy('xb).pivot("y", List("foo", "car")).max("z").show()
    df1.groupBy('xb).pivot("y", List("foo", "car")).agg("z" -> "max").show()
    df1.groupBy('xb, 'y).agg("z" -> "max").show()
  }

  case class Foo(x: String, y: String, z: Int)

  private def getColumn(df: DataFrame, spark: SparkSession) = {
    import spark.implicits._
    df.col("sequence")
    df.apply("sequence")
    $"sequence"
    import spark.implicits._
    val df1 = df.select($"sequence" % 2 === 0, $"age" + 1, $"gender".startsWith("M"))
    df.filter($"sequence" % 2 === 0 and $"gender".startsWith("M")).show()
    df1.show()
    val data = Seq(
      ("michael", 1, "event 1"),
      ("michael", 2, "event 2"),
      ("reynold", 1, "event 3"),
      ("reynold", 3, "event 4")).toDF("user", "time", "event")

    val data1 = data.withColumn("newColumn", struct('time, 'event))
    data1.printSchema()
    data1.show()
    data
      .groupBy('user)
      .agg(max(struct('time, 'event)) as 'event1)
      .show()

    val newestEventPerUser =
      data
        .groupBy('user)
        .agg(max(struct('time, 'event)) as 'event)
        .select($"user", $"event.*")
    newestEventPerUser.show()
  }

  private def groupByDemo(df: DataFrame, spark: SparkSession) = {
    import spark.implicits._
    val fd = new MyVariance(3)
    spark.udf.register("myVariance", fd)
    df.groupBy("sequence").agg(max("age"), avg("age") as "avg", variance("age") as "variance").show()
    df.cube("sequence").agg(stddev("age") as "sd").show()
    df.rollup("sequence").agg(countDistinct("age") as "dis_count", callUDF("myVariance", $"age") as "myvar").show()
  }

  private def cubeAgroupArollup(df: DataFrame, spark: SparkSession) = {
    df.createOrReplaceTempView("people")
    spark.sql("select sequence,gender,count(*) as group from people group by sequence,gender").show()
    spark.sql("select sequence,gender,count(*) as rollup from people group by rollup(sequence,gender)").show()
    spark.sql("select sequence,gender,count(*) as cube from people group by cube(sequence,gender)").show()
  }

  private def sortMethod(df: DataFrame, spark: SparkSession) = {
    import spark.implicits._
    //排序 orderBy调用sort
    df.sort("age", "gender", "sequence").show()
    df.orderBy($"age", $"sequence".desc).show()
  }

  private def handleColumn(df: DataFrame, spark: SparkSession) = {
    val muti2 = udf(
      (num: Int) => {
        num * 2
      }
    )
    import spark.implicits._
    spark.udf.register("mutiBy2", muti2)
    df.select($"name", $"age" + 1).show()
    df.selectExpr("sequence", "index as order", "mutiBy2(age) as muti2").show()
    df.select($"sequence", $"index" as "order", muti2($"age") as "muti2").show()
    df.drop("index").show()
  }

  private def groupByToApplyFunc(df: DataFrame, spark: SparkSession) = {
    df.createOrReplaceTempView("people")
    val variances = spark.sql("select sequence,VARIANCE(age) as var,max(age) as max,min(age) as min,count(*) as count from people group by sequence")
    variances.show()
  }

  private def handleRow(df: DataFrame, spark: SparkSession) = {
    import spark.implicits._
    val ds = df.where("sequence!=6411")
    val df1 = ds.toDF("col1", "col2", "col3", "col4", "col5")
    df1.printSchema()
    ds.printSchema()
    df1.show()
    df.where("sequence==6410 or sequence=6411").show()
    df.where($"sequence" === 6411 or $"sequence" === 6410).show()
    df.filter($"sequence" !== 6411).show()
    df.limit(4).show()
  }

  def main(args: Array[String]): Unit = {
    val spark = SparkSession
      .builder()
      .master("local[5]")
      .appName("rdd")
      .config("spark.local.dir", localPath)
      .config("spark.sql.warehouse.dir", hivePath)
      .getOrCreate()
    val df = generateDF(spark)
    spark.sparkContext.setLogLevel("ERROR")
    transformationWithDF(df, spark)
  }

  private def generateDF(spark: SparkSession) = {
    //generate rdd from json file( directly get dataType,you can use row.getLong)
    /* val df1 = spark.read.json("src/main/resources/data/people.json")
     df1.printSchema()
     df1.map(row => (row.getAs[String]("name"), row.getLong(0), row.getString(1), row.getLong(2), row.getString(3)))
       .foreach(row => println(row._1 + "|" + row._2 + "|" + row._3 + "|" + row._4 + "|" + row._5))


     //generate rdd from csv file with header which will generate  all StringType fields
     val df2 = spark.read.option("header", "true").csv("src/main/resources/data/people.csv")
     df2.printSchema()
     df2.map(row =>
       row.getAs[String](0) + "|" + row.getAs[String](1).toInt + "|" +
         row.getAs[String]("gender") + "|" + row.getAs[String](3).toInt)
       .foreach(str => println(str))


     //generate rdd from csv file without header which will control the field type
     val schema = StructType(Seq(
       StructField("name", StringType, true),
       StructField("age", IntegerType, true),
       StructField("gender", StringType, true),
       StructField("index", IntegerType, true)
     ))
     val df3 = spark.read.schema(schema).csv("src/main/resources/data/people1.csv")
     df3.printSchema()
     df3.map(row => (row.getString(0), row.getInt(1), row.getInt(3)))
       .foreach(tup => println(tup))

     //read data from hive table
     df3.write.saveAsTable("people")
     val df4 = spark.read.table("people")
     df4.printSchema()
     df4.write.parquet("src/main/resources/data/people")

     //read data from hdfs
     val df5 = spark.read.load("src/main/resources/data/people")

     //read data from parquet,best form hdfs
     val df6 = spark.read.parquet("src/main/resources/data/people")
     df6.printSchema()
     df6.map(row => (row.getInt(1), row.getString(0)))
       .foreach(tuple => println(tuple))*/

    val rdd = spark.sparkContext.textFile("src/main/resources/data/people2.csv")
    val rdd2 = rdd.map(line => line.split(",")).map(arr => Row(arr(0), arr(1), arr(2).toInt, arr(3), arr(4).toInt))
    val schema = StructType(
      StructField("sequence", StringType, true) ::
        StructField("name", StringType, true) ::
        StructField("index", IntegerType, true) ::
        StructField("gender", StringType, true) ::
        StructField("age", IntegerType, true) :: Nil
    )
    val df7 = spark.createDataFrame(rdd2, schema)
    df7.printSchema()
    df7.show()
    df7
  }
}
