package main.scala.com.hive.spark.test

import org.apache.spark.SparkConf
import org.apache.spark.SparkContext
import org.apache.spark.sql.hive.HiveContext
import org.apache.hadoop.hive.conf.HiveConf
import java.net.URL
import org.apache.hadoop.conf.Configurable
import org.apache.hadoop.conf.Configuration
import org.apache.spark.SparkConf
import org.apache.spark.sql.hive.HiveContext
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.GroupedData
import org.apache.spark.sql.GroupedDataset

/**
 * @author ZhiLi
 */
object ContextForHive {
  def main(args: Array[String]): Unit = {
    val sparkConf = new SparkConf().setAppName("hiveTest")
    val sc: SparkContext = new SparkContext(sparkConf)
    val hiveContext = new HiveContext(sc);
    hiveContext.sql("use default")
    val tableDF = hiveContext.sql("select * from rx5_tbox_parquet_all where pt=20170316 limit 100")
    tableDF.cache();
    
    val schema = tableDF.schema
    
    schema.foreach { x =>
      val colDF = tableDF.select(x.name)
      println("Column name:"+x.name)
      println("Column type:"+x.dataType)
      
      val groupData = colDF.groupBy(x.name).count().rdd
      groupData.foreach { x => println(x) }
//      println("Column Count1:" + groupData.count())
      
//      if(x.dataType != "string"){
//        println(groupData.)
//      }
   }
    
    sc.stop()
  }
}