package cn.lecosa.spark.hive

import org.apache.spark.SparkContext
import org.apache.spark.SparkConf
import scala.collection.mutable.ArrayBuffer
import scala.util.Random
import org.apache.spark.sql.SQLContext

case class Info(name: String, age: Int, gender: String, addr: String) extends java.io.Serializable

object Demo1 {
  def main(args: Array[String]) {

    val conf = new SparkConf().setMaster("local[2]").setAppName("KyroTest")

    val sc = new SparkContext(conf)
    val hiveContext = new org.apache.spark.sql.hive.HiveContext(sc)
    val arr = new ArrayBuffer[Info]()

    val nameArr = Array[String]("lsw", "yyy", "lss")
    val genderArr = Array[String]("male", "female")
    val addressArr = Array[String]("beijing", "shanghai", "shengzhen", "wenzhou", "hangzhou")

    for (i <- 1 to 100) {
      val name = nameArr(Random.nextInt(3))
      val age = Random.nextInt(100)
      val gender = genderArr(Random.nextInt(2))
      val address = addressArr(Random.nextInt(5))
      arr.+=(Info(name, age, gender, address))
    }

    val rdd = sc.parallelize(arr)
    val sqlContext = new SQLContext(sc)
    import sqlContext.implicits._
    rdd.toDF().createTempView("info")

    //    sqlContext.sql(" create table sutdent as select * from info") //.show()

    hiveContext.sql("use lecosa")
    hiveContext.sql("create table student as select * from info")

    //    val stuDf=rdd.map(_.split(",")).map(parts=>Info(parts(0).trim.toInt,parts(1),parts(2).trim.toInt))
    //    val res1 =rdd.map { row => row(1) }
    //序列化的方式将rdd存到内存
    //    rdd.persist(StorageLevel.MEMORY_ONLY_SER)
    //    println(rdd.collect().toBuffer)

    sc.stop()
  }
}

