package org.shj.spark.sql

import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.Row
import org.apache.spark.sql.types.StructType
import org.apache.spark.sql.types.StringType
import org.apache.spark.sql.types.StructField

case class Score(xuehao: String, score: Long)

object DatesetTransformationDemo {
  def main(args: Array[String]): Unit = {
    val ss = SparkSession.builder().appName("DatesetTransformationDemo").master("local").getOrCreate()
    import ss.implicits._
    val sc = ss.sparkContext
    sc.setLogLevel("WARN")
    
    val stu = Array(("s1", "zhuyin"), ("s2", "xuruyun"), ("s3", "bdyjy"), ("s4", "yangmi"))    
    val stuRdd = sc.parallelize(stu)
    
    //把 RDD 转化为 DataFrame
    val rowRdd = stuRdd.map(item => Row(item._1, item._2))    
    val schema = StructType(Array(StructField("sxuehao", StringType), StructField("name", StringType)))
    val stuDf = ss.createDataFrame(rowRdd, schema)
    
    //把 RDD 转化为 DataFrame
    val score = Array(("s6", 80), ("s2", 90), ("s5", 98), ("s1", 70), ("s3", 60))
    val scoreRdd = sc.parallelize(score,2)
    val scoreDf = scoreRdd.map(item => Score(item._1, item._2)).toDF()
    
    /**
     * DataFrame 的 join 和 RDD的join不一样，RDD中自动根据key进行join，但是DataFrame中需要
     * 指定进行join的字段，此时字段名虽然处在不同的DataFrame，从但是还是要不一样才行。要不然会报错
     */
    stuDf.join(scoreDf, $"sxuehao"===$"xuehao").show()
        
    
    ss.stop()
  }
}