package sparksql

import org.apache.spark.sql.{DataFrame, SparkSession}

object TestSparkSqlHive {

  def build_tarin_data(data:DataFrame): DataFrame = {
    //---------------dataframe处理操作-------------

    //查看数据信息
    //data.show()

    //查看schema信息
    data.printSchema()

    //查看某一列
    //data.select("label_y")

    //过滤异常值null
    //data.filter($"label_y".gt(100)).shows
    data
  }

  def main(args: Array[String]): Unit = {
    val file_path = "hdfs:///test/data_hive.txt"
    val out_path = "hdfs:///test/train_data.txt"

//    val file_path = "D:\\git\\TestSparkScala\\data\\data_hive.txt"
//    val out_path = "D:\\git\\TestSparkScala\\data\\train_data_hive.txt"

    //新建任务会话
    val hiveContext = SparkSession
      .builder()
      .appName("TestSparkHive")
      .master("local")
      .enableHiveSupport()
      .getOrCreate()

    //使用Hivecontext进行建表等操作
    //建表
    hiveContext.sql("DROP TABLE IF EXISTS test_data")
    hiveContext.sql(s"""
                       | CREATE TABLE IF NOT EXISTS test_data(
                       | label_y DOUBLE,
                       | feature_x1 DOUBLE,
                       | feature_x2 DOUBLE,
                       | feature_x3 DOUBLE,
                       | feature_x4 DOUBLE,
                       | feature_x5 DOUBLE)
                    """.stripMargin)
    //导入数据
    hiveContext.sql(s"LOAD DATA LOCAL INPATH '$file_path' INTO TABLE test_data")

    //查询测试
    val selectData = hiveContext.sql("SELECT label_y FROM test_data where label_y > 10")

    //处理数据
    val df_data = build_tarin_data(selectData)

    //查看数据
    println("=======================查看数据==========================")
    df_data.show(10)

    //处理完后保存为新的hive表
    //df_data.write.saveAsTable("train_data")

    //保存查询数据到HDFS
    selectData.write.format("txt").save(out_path)

    //LinearRegression
    //val model_out_path = "hdfs:///test/model"
    //lr(out_path,model_out_path)
    hiveContext.stop()
  }

}
