import org.apache.spark.SparkConf
import org.apache.spark.SparkContext._
import org.apache.spark.sql.types.{StructType, StructField, StringType, IntegerType};
import org.apache.spark.sql.SparkSession;
import org.apache.spark.sql.hive.HiveContext
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.sql.{DataFrame, Row, SaveMode, _}
import org.apache.spark.sql.SQLContext
//注意代码运行前
//必须启动dfs-start.sh
//必须启动hive --service metastore



object RDD2Dataframe
{


         def main(args: Array[String]): Unit = 
            {

                import org.apache.spark.sql.SparkSession
                val spark: SparkSession = SparkSession.builder
                                                                                .appName("My Spark Application")  // optional and will be autogenerated if not specified
                                                                                .master("local[*]")               // avoid hardcoding the deployment environment
                                                                                .enableHiveSupport()              // self-explanatory, isn't it?
                                                                                // .config("spark.sql.warehouse.dir", "target/spark-warehouse")
                                                                                .getOrCreate()

                runProgrammaticSchemaExample(spark)
            }

 
        private def runProgrammaticSchemaExample(spark:SparkSession): Unit =  //这里的意思是定义了一个叫spark的变量,类型是SparkSession
            {
                    // 2.定义schema，带有StructType的
                    // 定义schema信息
                    val schemaString = "name age"//这里是把数据表格的字段全部写在一起了.
                    // 对schema信息按空格进行分割
                    // 最终fileds里包含了2个StructField
                    val fields = schemaString.split(" ")
                                              // 字段类型，字段名称判断是不是为空
                                             .map(fieldName => StructField(fieldName, StringType, nullable = true))//这个的做法是所有字段一律认为是StringType
                    println("--------------------------------------------------------------------------")
                    println("type of fields:"+fields.getClass.getSimpleName)//类型:StructField[]
                    println("--------------------------------------------------------------------------")
                    val schema = StructType(fields)//这个根据字段信息创建了表格

//--------------------------------------------上面是为了形成schema--------------------------------------------


                    // 3.把我们的schema信息作用到RDD上
                    //   这个RDD里面包含了一些行
                    // 形成Row类型的RDD
                      // 1.转成RDD,下面是开始处理文本信息
                    val rdd = spark.sparkContext.textFile("./employee.txt")
                    println("type of rdd:"+rdd.getClass.getSimpleName)//rdd类型是MapPartitionsRDD
                    val rowRDD = rdd.map(_.split(","))
                                    .map(x => Row(x(0), x(1).trim))//trim这里的意思是,我参考js的说法,是用来去除两端的字符串

//--------------------------------------------上面是为了形成rowRDD--------------------------------------------

                    // 通过SparkSession创建一个DataFrame
                    // 传进来一个rowRDD和schema，将schema作用到rowRDD上
                    println("type of rowRDD:"+rowRDD.getClass.getSimpleName)//MapPartitionsRDD
                    println("type of schema:"+schema.getClass.getSimpleName)//StructType

                    val peopleDF = spark.createDataFrame(rowRDD, schema)
                    peopleDF.show()
//--------------------------------------------上面是为了输出dataframe------------------------------------------

//--------------------------------------------下面是为了注册临时表-----------------------------------
                    peopleDF.createOrReplaceTempView("employee")
                    val sqlDF = spark.sql("SELECT * FROM employee where age > 16")
                    sqlDF.show()
                }

}

//代码来自:
//https://www.2cto.com/net/201704/622166.html

