package com.jscloud.sparksql.coffee

import org.apache.spark.SparkConf
import org.apache.spark.sql.SparkSession

object CoffinChainCSV {

        def main(args: Array[String]): Unit = {

                val conf = new SparkConf()
                conf.setAppName("SparkSQLOpt")
                conf.setMaster("local[1]")
                conf.set("spark.sql.adaptive.enabled", "true")

                val spark: SparkSession = SparkSession.builder().config(conf).getOrCreate()

                //  # 读入csv文件，读取完后df的类型为dataframe
                // val filePath=""
                val df = spark.read.options(
                        Map("inferSchema" -> "true", "delimiter" -> ",", "header" -> "true"))
                        .csv("D:\\JSProjects\\jsCloud-bigdata-app\\sparkapp\\src\\main\\resources\\Coffee Chain.csv")
                //   # 创建表用于sql查询
                df.createOrReplaceTempView("coffee")
                //  # 执行查询
                val b = spark.sql("select product,sum(Marketing) as number from coffee group by product order by number desc")
                //  # 保存文件为sell_num.csv
                b.write.option("header", true).csv("file:///D:\\JSProjects\\jsCloud-bigdata-app\\datas\\coffee-out1")

        }


}