package com.sql

import jdk.nashorn.internal.runtime.regexp.joni.encoding.CharacterType
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.types.{IntegerType, StringType, StructField, StructType}
import org.apache.spark.sql.{DataFrame, Row, SQLContext}
import org.apache.spark.{SparkConf, SparkContext}

case class Person(var username:String,var age:Int,var gender:Char){
   var name:String=username
   var pAge:Int=age
   var pGender:Char=gender
  override def toString: String = " pName ="+this.name+" pAge ="+this.pAge+" pGender ="+pGender
}

object ReadTxt {

    def main(args: Array[String]): Unit = {
      val conf=new SparkConf().setMaster("local").setAppName("txt-test")
      val sc=new SparkContext(conf)
      val sparkSQL=new SQLContext(sc)
      val rdd:RDD[String]=sc.textFile("./sparksql/SQL_FOR_PERSON")
      val result=rdd.map(u=>{
        val line=u.split(",") //将每条数据切割
        Tuple3(line(0),line(1).toInt,line(2))//使用map一对一返回tuple3元组
        }).map(recored=>{Row(recored._1,recored._2,recored._3)})//返回记录Row
      val schema = StructType//构造schema对象
      {
        List(
          StructField("username", StringType, false),
          StructField("age", IntegerType, false),
          StructField("gender", StringType, false)
        )
      }

      val frame:DataFrame = sparkSQL.createDataFrame(result,schema)
      frame.registerTempTable("person")
      frame.printSchema()
      val data:DataFrame = sparkSQL.sql("select * from person where username='zhangsan'")
      data.rdd.foreach(println)
//



      sc.stop()
    }

}
