package com.kgc.bigdata.spark.sql

import org.apache.spark.sql.SQLContext
import org.apache.spark.sql.hive.HiveContext
import org.apache.spark.{SparkConf, SparkContext}

/**
  * Spark SQL访问外部数据源数据
  */
object DataSourceApp {

  def main(args: Array[String]) {
    val sparkConf = new SparkConf().setMaster("local[2]").setAppName("DataSourceApp")

    val sc = new SparkContext(sparkConf)
    val sqlContext = new org.apache.spark.sql.SQLContext(sc)
    val hiveContext = new HiveContext(sc)
    //parquetFile(sqlContext)

    jdbcTable(sqlContext).show()

    hiveTable(hiveContext).show()
    sc.stop
  }

  def hiveTable(hiveContext: HiveContext) = {
    hiveContext.table("emp")
  }

  def jdbcTable(sqlContext: SQLContext) =  {
   sqlContext.read.format("jdbc")
     .options( Map
     ("url" -> "jdbc:mysql://hadoop000:3306/hive?user=root&password=root",
       "dbtable" -> "TBLS",
       "driver" -> "com.mysql.jdbc.Driver")).load()
  }

  def parquetFile(sqlContext: SQLContext): Unit = {
    val df = sqlContext.read.parquet("H:/workspace/SparkProject/src/data/users.parquet")
    df.show()
  }
}
