package com.hiscene.sparksql

object SparkSQLCSV {

  import java.io.File

  import org.apache.spark.sql.SparkSession

  def main(args: Array[String]): Unit = {

    import org.apache.spark.sql.DataFrame
    val warehouseLocation = new File("spark-warehouse").getAbsolutePath
    val spark: SparkSession = SparkSession
      .builder()
      .master("local[*]")
      .appName("Spark Hive Example")
      .getOrCreate()
    spark.sparkContext.setLogLevel("ERROR")

    import spark.implicits._

    val df: DataFrame = spark.read.format("csv")
      .option("sep", ",")
      .option("header", "true")
      .option("encoding","gbk")
      //.option("inferSchema", "true")
      .schema("id INT, name STRING, price DOUBLE")
      .load("file:///D:/b.csv")

    df.printSchema()

    df.createOrReplaceTempView("person")

   // spark.sql("select name ,sum(price) as total from person group by name order by total desc").show(false)
    import org.apache.spark.sql.functions._
    df.groupBy("name").agg(sum("price").as("total")).sort($"total".desc).where($"total">=20).withColumn("psa",$"total"/2)

    .show(false)
  }
}
