package com.shujia.sql

import org.apache.spark.sql.hive.HiveContext
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.sql.{SQLContext, SaveMode}

object Demo3 {
  def main(args: Array[String]): Unit = {



    val conf = new SparkConf()
      //.setMaster("local")
      .setAppName("Demo1sql")

    val sc = new SparkContext(conf)


    //spark  sql 入口
    val sqlContext = new SQLContext(sc)

    //直接可以使用hive元数据
    val hiveContext = new HiveContext(sc)


    /**
      * 读取hdfs数据 创建df   写sql
      *
      */


    val studentRDD = sc.textFile("/data/student/")


    import sqlContext.implicits._

    val studentDF = studentRDD.map(line => {
      val split = line.split(",")
      val id = split(0)
      val name = split(1)
      val age = split(2).toInt
      val gender = split(3)
      val clazz = split(4)

      (id, name, age, gender, clazz)
    }).toDF("id", "name", "age", "gender", "clazz") //指定列名


    studentDF.registerTempTable("student")

    val genderCount = sqlContext.sql("select gender,count(1)  from student group by gender")


    //将df数据保存到hdfs

    genderCount
      .write
      .mode(SaveMode.Overwrite)
      .parquet("/data/gendercount")


  }
}
