package com.o2o.cleaning.month.platform.ebusiness_plat.meituanwm

import com.o2o.utils.times.TimesYearAll
import org.apache.spark.SparkContext
import org.apache.spark.sql.SparkSession

object MeiTuanWM {

  var platform = "MT" //平台名称
  var year = "2021" //当月的年份、月份
  var month = "11"
  var timeStamp = TimesYearAll.TIME202111 //每个月固定时间戳
  val readCollection = s"mtwm_sc_good_21${month}"
  val resultUrl = s"s3a://o2o-sourcedata-${year}/obs-source-${year}/${month}/${readCollection}/" //拉取数据保存路径

  def main(args: Array[String]): Unit = {

    // spark 连接配置
    val spark = SparkSession.builder()
      .master("local[*]")
      .config("spark.debug.maxToStringFields", "10000")
      .appName("MongoSparkConnectorIntro")
      .config("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
      .getOrCreate()

    // obs 设置
    val sc: SparkContext = spark.sparkContext
    sc.hadoopConfiguration.set("fs.s3a.access.key", "GAO7EO9FWKPJ8WFCQDME")
    sc.hadoopConfiguration.set("fs.s3a.secret.key", "LZ0xaHBSYKHaJ9ECDbX9f7zin79UZkXfGoNapRPL")
    sc.hadoopConfiguration.set("fs.s3a.endpoint", "https://obs.cn-north-1.myhuaweicloud.com")
    sc.setLogLevel("ERROR")

    //    spark.read.orc(resultUrl).printSchema()
    spark.read.orc(resultUrl).registerTempTable("t1")
    spark.sql(
      """
        |select shopName
        |,sum(sellCount)sellCount
        |,sum(sellCount * priceText)salesAmount
        |from t1
        |where title like '%云南白药%' and title like '%牙膏%' and sellCount > 0
        |group by shopName
        |order by salesAmount desc
        |""".stripMargin).repartition(1).write.mode("overwrite").option("header", "true").csv("D:\\test1")

  }

}