package scalapackage.testspark

import org.apache.spark.rdd.RDD
import org.apache.spark.sql.types.{IntegerType, StringType, StructField, StructType}
import org.apache.spark.sql.{DataFrame, Row, SQLContext}
import org.apache.spark.{SparkConf, SparkContext}

/**
  * Created by Germmy on 2018/5/29.
  */
object StructureTypeTest {


  def main(args: Array[String]) {

    val sparkConf: SparkConf = new SparkConf().setAppName("TestSparkSql").setMaster("local[*]")
    val sc: SparkContext = new SparkContext(sparkConf)

    //1、引入sparkSqlContext
    val sQLContext=new SQLContext(sc)

    //2、剩下的和sparkShell逻辑一样
    //2.1、读取文件
    val lines: RDD[Array[String]] = sc.textFile(args(0)).map(_.split(","))//它为什么能够推断出来它就是可以被split,难道就是因为rdd[String]?


    //2.2 创建StructureType
    val schema=StructType(
      List(
        StructField("id",IntegerType,true),//我艹，要加,
        StructField("name",StringType,true),
        StructField("age",IntegerType,true),
        StructField("faceVal",IntegerType,true)
      )
    )

    //2.3 将lines转化为DataFrame
    val personRdd: RDD[Row] = lines.map(x=>Row(x(0).toInt,x(1),x(2).toInt,x(3).toInt))
    val personDf: DataFrame = sQLContext.createDataFrame(personRdd,schema)

    //2.4、注册表
    personDf.registerTempTable("myTable")

    //2.5、查询
    val dataFrame: DataFrame = sQLContext.sql("select * from myTable order by age desc limit 2")

    //2.6、写入Hdfs
    dataFrame.write.json(args(1))

    sc.stop()



  }

}
