package sparksql

import model.LinearRegression_online.lrOnline
import org.apache.spark.sql.{DataFrame, SparkSession}

object TestSparkSqlFile {
  def build_tarin_data(data:DataFrame): DataFrame = {
    //---------------dataframe处理操作-------------

    //查看数据信息
    //data.show()

    //查看schema信息
    //data.printSchema()

    //查看某一列
    //data.select("label_y")

    //过滤异常值null
    //data.filter($"label_y".gt(100)).shows
    data
  }

  def main(args: Array[String]): Unit = {

    //HDFS数据路径
//    val file_path = "hdfs:///test/data_hive.txt"
//    val out_path = "hdfs:///test/train_data.txt"

    val file_path = "\\data\\data1.txt"
    val out_path = "\\data\\train_data_sql.txt"

    //新建任务会话
    val spark = SparkSession
      .builder()
      .master("local")
      .appName("TestSparkSql")
      .getOrCreate()

    //读取数据
    val df = spark.read.text(file_path)
    //df.map(line => Row(line.split(",")))

    //处理数据
    val df_data = build_tarin_data(df)
    df_data.show(10)

    //建立临时表
    df_data.createOrReplaceTempView("train_data")

    var sql =
      s"""
         | select
         | label_y,
         | feature_x1,
         | feature_x2,
         | feature_x3,
         | feature_x4,
         | feature_x5,
         | from train_data
      """.stripMargin

    //执行sql
    val data: DataFrame = spark.sql(sql)

    //查看数据
    println("=======================查看数据==========================")
    data.show(10)

    //输出保存
    data.write.format("txt").save(out_path)

    //LinearRegression
    val save_model_path = "//model"
    lrOnline(out_path,save_model_path)

    spark.stop()

  }

}
