package com.bianmaba.hive.scala

import org.apache.spark.sql.{SaveMode, SparkSession}

object WriteDemo {

  def main(args: Array[String]): Unit = {
    System.setProperty("HADOOP_USER_NAME", "root")

    val spark = SparkSession.builder
      .appName("write_data_to_hive")
      .master("local[*]")
      .config("spark.some.config.option", "spark")
      .enableHiveSupport
      .getOrCreate();

    import spark.implicits._

    val rdd = spark.sparkContext.textFile("hdfs://hadoop-master:9000/input/info.txt")



    //方法一使用反射方式
        val ds = rdd.map(_.split(",")).map(values => Info(values(0).trim, values(1).trim)).toDF();
        ds.printSchema()

    //第二种方式使用schema方式
//    val fields = "id,name".split(",")
//      .map(fieldName => StructField(fieldName, StringType, nullable = true))
//    val schema = StructType(fields)
//    val infoRdd = rdd.map(_.split(",")).map(values => Row(values(0).trim, values(1).trim))
//    val ds = spark.createDataFrame(infoRdd, schema)


    spark.sql("create table if not exists info(id string,name string)").show()
    spark.sql("show tables").show()
    //以下两种方式都可实现数据插入
    ds.write.mode(SaveMode.Overwrite).saveAsTable("info")
    // ds.createOrReplaceTempView("info_tem");
    //spark.sql("insert into info select id,name from info_tem");

    spark.sql("select * from info").show()
    spark.stop();
  }

  case class Info(id: String, name: String)

}
