package com.dyj.ads
import org.apache.spark.sql.expressions.Window
import org.apache.spark.sql.{DataFrame, SparkSession}
object ads_e_mz_ave7662_tt {
  def main(args: Array[String]): Unit = {
    val ds: String = args(0)

    val sparkSession: SparkSession = SparkSession.builder()
      .appName("酮体指标统计")
      .enableHiveSupport()
      .config("spark.sql.shuffle.partitions", 1)
      .getOrCreate()

    import sparkSession.implicits._
    import org.apache.spark.sql.functions._

    sparkSession.sql("use bigdata03_dws")

    val ttDF: DataFrame = sparkSession.sql(
      s"""
        |select
        |baoGaoBianHao,
        |tongti,
        |baoGaoRiQi
        |from
        |bigdata03_dws.dws_e_mz_ave7662niaochanggui
        |where
        |ds='${ds}'
        |""".stripMargin)

    ttDF.withColumn("tt_ycgs", sum(when($"tongti" === "酮体水平显著升高", 1)).over())
      .select($"baoGaoBianHao", $"tongti", $"baoGaoRiQi", $"tt_ycgs")
      .write
      .format("csv")
      .save("/daas/motl/bigdata03/ads/ads_e_mz_ave7662_tt/ds=" + ds)
  }


}
