package com.learn.lb.spark.sql

import org.apache.spark.sql.{Column, SparkSession}

import scala.collection.mutable.ListBuffer
import scala.util.Random

/**
 * rdd 转 dataFrame
 * 构造case class，利用反射机制隐式转换
 * @author laibo
 * @since 2019/9/4 14:20
 */
object Rdd2DataFrameDemo {

  def main(args: Array[String]): Unit = {
    val sparkSession = SparkSession.builder()
      .master("local").appName("Rdd2DataFrameDemo").getOrCreate()
    var testData = new ListBuffer[String]
    val random = new Random()
    for (_ <- 0 to 10000) {
      val id = "" + (Math.random() * 1000000).toInt
      val name = "张三".concat(random.nextInt(999999999) + "")
      val age = "" + random.nextInt(50)
      testData += id.concat(",").concat(name).concat(",").concat(age)
    }
    //导入隐式转换，这样才可以调用toDF函数
    import sparkSession.implicits._
    val testDF = sparkSession.sparkContext.parallelize(testData.toList)
      .map(_.split(","))
      .map(v => TestData(v(0), v(1), v(2).toInt))
      .toDF()
    testDF.printSchema()
    // 相当于 select name from table
    testDF.select(new Column("name")).show()
    //相当于 select age + 10  as newAge from table
    testDF.select(new Column("age") + 10 as "newAge").show()
    //相当于 select * from table where table.age > 30
    testDF.filter(new Column("age") > 30).show()
    //相当于 select name, count(1) from table group by name
    testDF.groupBy(new Column("name")).count().show()
    sparkSession.stop()
  }

  case class TestData(id: String, name: String, age: Int)

}
