package com.o2o.cleaning.month.platform.ebusiness_plat.dazhongdp

import com.alibaba.fastjson.{JSON, JSONObject}
import com.mongodb.spark.MongoSpark
import com.mongodb.spark.config.ReadConfig
import com.o2o.cleaning.month.platform.ebusiness_plat.dazhongdp.DaZhongdp.{address, resultPath}
import com.o2o.utils.times.TimesYearAll
import org.apache.spark.SparkContext
import org.apache.spark.sql.functions.lit
import org.apache.spark.sql.{DataFrame, Dataset, Row, SparkSession}

/**
  * 步骤：1、提取补采信息给采集组
  *      2、合并两部分数据
  *      3、本月数据与上月数据关联  月销量  关联到的相减     关联不到的作为新增
 *
  * @author: gaoyadi
  * @Date: 2018/6/21 10:20
  * @Description:执行时需要修改的变量 month  flag  timeStamp  collection  address
  *    第一步：修改flag参数和数据集合名称，先拉取数据到OBS 同时把需要补采的数据提取出来 等待采集组补采完后 再执行第二步清洗
  *    当flag为bu的时候，原始全量数据备份 以及需要补采的数据提取  此时 collection为全量集合的名称 mt_tg_detail
  *    当flag为all的时候，补采好的数据备份以及数据清洗等  此时 collection为全量集合的名称 mt_tg_detail_bu
  * @Modify By:
  */
object DaZhongdpTest {

  //----------补采：bu  ------------计算：all----
//  var flag = "bu"
  var flag = "all"

  //平台名称
  var platform = "dzdp"
  //当月的月份
  val year = 2020
  var month = "7"
  var lastMonth = "6"
  var last2Month = "5"
  //每个月固定时间戳
  var timeStamp = TimesYearAll.TIME202007  //每个月固定时间戳
  //mongo库中的库名称
  var database = "Dzdp"
  var collection_0 = "dzdp_app_shop_info_two_2007"
  var collection_1 = "dzdp_app_shop_info_two_2007_backup"

  //补采或者当月原始数据路径
  var sourcePart0Path = s"s3a://o2o-sourcedata/obs-source-2020/${month}/${platform}/1"
  var sourcePart1Path = s"s3a://o2o-sourcedata/obs-source-2020/${month}/${platform}/2"

  //新增数据路径
  var newAddPath = s"s3a://o2o-dataproces-group/zsc/2020/${month}/${platform}/newAdd"

  def main(args: Array[String]): Unit = {

    val spark = SparkSession.builder()
//                    .master("local[*]")
      .appName("DZDP")
      .config("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
      .getOrCreate()

    var sc: SparkContext = spark.sparkContext
    sc.hadoopConfiguration.set("fs.s3a.access.key", "GAO7EO9FWKPJ8WFCQDME")
    sc.hadoopConfiguration.set("fs.s3a.secret.key", "LZ0xaHBSYKHaJ9ECDbX9f7zin79UZkXfGoNapRPL")
    sc.hadoopConfiguration.set("fs.s3a.endpoint", "https://obs.cn-north-1.myhuaweicloud.com")
    sc.setLogLevel("ERROR")

    //保存MongoDB数据---->OBS
//    saveToObs(sc,spark,sourcePart0Path,database,collection_0)
//    saveToObs(sc,spark,sourcePart1Path,database,collection_1)
//    println("===落地JSON成功===")

    //读取OBS数据
    val fromDF: DataFrame = spark.read.json(sourcePart0Path)//.where("sellCount != -1").where("sellCount180 != -1").where("sellCount360 != -1")

    fromDF.cache()

    val toDF: DataFrame = spark.read.json(sourcePart1Path)//.where("sellCount != -1").where("sellCount180 != -1").where("sellCount360 != -1")

    toDF.cache()

    //计算销量
    println("======计算销量======")
    val infoDF = caculate(spark,sc,fromDF,toDF)

    var df_label = infoDF
      .withColumn("timeStamp", lit(s"${timeStamp}")).withColumn("platformName", lit("大众点评"))
      .withColumn("platformId", lit("24"))

    val cateDF: DataFrame = DaZhongdp.dzdpCate(spark,df_label)

    val df_addr = DaZhongdp.dzdpAddress(spark, cateDF, address)

    //关联店铺
   /* val lastmonthdf = spark.read.json("s3a://o2o-dataproces-group/chen_lixiu/2020/6/dzdp/good/")

    lastmonthdf.cache()

    lastmonthdf.registerTempTable("lastmonth")

    val lastShopDf = spark.sql(
      """
        |select
        |shopId,
        |shopName,
        |count(1) ct,
        |sum(sellCount) sell,
        |sum(salesAmount) as sales
        |from
        |lastmonth
        |group by shopId,shopName
        |order by sales desc
      """.stripMargin)

    lastShopDf.registerTempTable("lastshop")

    val thisShopDf = spark.sql(
      """
        |select
        |shopId,
        |shopName,
        |count(1) cts,
        |sum(sellCount) sells,
        |sum(salesAmount) as saless
        |from
        |info
        |group by shopId,shopName
        |order by saless desc
      """.stripMargin)

    thisShopDf.registerTempTable("thisshop")

    val shopres = spark.sql(
      """
        |select
        |a.shopId,
        |a.shopName,
        |a.cts,
        |b.ct,
        |a.sells,
        |b.sell,
        |a.saless,
        |b.sales
        |from
        |thisshop a
        |left join
        |lastshop b
        |on a.shopId=b.shopId
        |where b.shopId is not null
      """.stripMargin)*/

//    shopres.repartition(1).write.csv("s3a://o2o-dataproces-group/zsc/2020/6/dzdp/shop/")


    //第五步：商品数据落地
    df_addr.repartition(1).write.json(resultPath)

    df_addr.registerTempTable("info")

    spark.sql(
      """
        |select
        |count(1),
        |sum(sellCount),
        |sum(salesAmount)
        |from
        |info
      """.stripMargin).show()





    sc.stop()
  }


  def caculate(spark:SparkSession,sc:SparkContext,fromDF:DataFrame,toDF:DataFrame): DataFrame ={
    fromDF.registerTempTable("fromtab")
    toDF.registerTempTable("totab")

    println("=======关联到的总额=======")
    val infoDF = spark.sql(
      """
        |select
        |a.*,
        |b.sellCount as sell,
        |b.sellCount180 as sell180,
        |b.sellCount360 as sell360
        |from
        |totab a
        |left join
        |fromtab b
        |on a.good_id = b.good_id
        |where b.good_id is not null
      """.stripMargin)


//    println("=======关联不到的总额=======")
//    val newAdd = spark.sql(
//      """
//        |select
//        |a.*,
//        |b.sellCount as sell,
//        |b.sellCount180 as sell180,
//        |b.sellCount360 as sell360
//        |from
//        |totab a
//        |left join
//        |fromtab b
//        |on a.good_id = b.good_id
//        |where b.good_id is null
//      """.stripMargin)

//    newAdd.repartition(1).write.orc(newAddPath)

    infoDF.cache()

    val leijiDF: Dataset[Row] = infoDF.where("sellCount != -1")

    val halfYearDF: Dataset[Row] = infoDF.where("sellCount180 != -1")

    val allYearDF: Dataset[Row] = infoDF.where("sellCount360 != -1")

    println("===累计销量"+leijiDF.count())
    println("===半年销量"+halfYearDF.count())
    println("===全年销量"+allYearDF.count())

    //1.累计销量计算销售额

    leijiDF.registerTempTable("leijitab")

    val lejiRes = spark.sql(
      """
        |select
        |*,
        |cast((sell-sellCount)*3 as bigint) as sells
        |--cast((sell-sellCount)*3*priceText as decimal(20,2)) as salesAmount
        |from
        |leijitab
      """.stripMargin).drop("sellCount").drop("sellCount180").drop("sellCount360").drop("sell").drop("sell180").drop("sell360")
        .withColumnRenamed("sells","sellCount").where("sellCount > 0")//.where("salesAmount > 0")



    /*    lejiRes.registerTempTable("leiji")

        println("===leiji data===")
        spark.sql(
          """
            |select
            |*
            |from
            |leiji
          """.stripMargin).show(20)

        println("累计总额")
        spark.sql(
          """
            |select
            |count(1),
            |sum(sellCount),
            |sum(salesAmount)
            |from
            |leiji
          """.stripMargin).show()
        println("累计-店铺数")
        spark.sql(
          """
            |select
            |count(distinct shopId)
            |from
            |leiji
          """.stripMargin).show()

        println("leiji-TOP店铺")
        spark.sql(
          """
            |select
            |shopId,
            |shopName,
            |sum(sellCount),
            |sum(salesAmount) as sales
            |from
            |leiji
            |group by shopId,shopName
            |order by sales desc
            |limit 20
          """.stripMargin).show()

        println("TOP分类")
        spark.sql(
          """
            |select
            |categoryId,
            |categoryName,
            |count(1),
            |sum(sellCount),
            |sum(salesAmount) as sales
            |from
            |leiji
            |group by categoryId,categoryName
            |order by sales desc
            |limit 30
          """.stripMargin).show(30)

        println("TOP分类")
        spark.sql(
          """
            |select
            |categoryId,
            |categoryName,
            |count(1) ct,
            |sum(sellCount),
            |sum(salesAmount) as sales
            |from
            |leiji
            |group by categoryId,categoryName
            |order by ct desc
            |limit 30
          """.stripMargin).show(30)


        //2.半年销量计算销售额

        halfYearDF.registerTempTable("halfyeartab")

        val halfyearRes = spark.sql(
          """
            |select
            |*,
            |cast((sell180-sellCount180)/6 as bigint) as sells,
            |cast((sell180-sellCount180)/6*priceText as decimal(20,2)) as salesAmount
            |from
            |halfyeartab
          """.stripMargin).drop("sellCount").drop("sellCount180").drop("sellCount360").drop("sell").drop("sell180").drop("sell360")
          .withColumnRenamed("sells","sellCount")

        halfyearRes.registerTempTable("halfyear")
        println("半年总额")
        spark.sql(
          """
            |select
            |count(1),
            |sum(sellCount),
            |sum(salesAmount)
            |from
            |halfyear
          """.stripMargin).show()

        //3.全年销量计算销售额

        allYearDF.registerTempTable("allyeartab")

        val allyearRes = spark.sql(
          """
            |select
            |*,
            |cast((sell360-sellCount360)/12 as bigint) as sells,
            |cast((sell360-sellCount360)/12*priceText as decimal(20,2)) as salesAmount
            |from
            |allyeartab
          """.stripMargin).drop("sellCount").drop("sellCount180").drop("sellCount360").drop("sell").drop("sell180").drop("sell360")
          .withColumnRenamed("sells","sellCount")

        allyearRes.registerTempTable("allyear")
        println("全年总额")
        spark.sql(
          """
            |select
            |count(1),
            |sum(sellCount),
            |sum(salesAmount)
            |from
            |allyear
          """.stripMargin).show()

        val allRes: DataFrame = lejiRes.union(halfyearRes).union(allyearRes)*/

    lejiRes
  }

  /**
    * 保存到OBS上
    * @param sc
    * @param savepath
    * @param database
    * @param collection
    */
  def saveToObs(sc: SparkContext,spark: SparkSession,savepath:String,database:String,collection:String){
    val detail_from: ReadConfig = ReadConfig(Map(
      "spark.mongodb.input.uri" -> "mongodb://ob:O2Odata123!@ 192.168.0.149:27017/admin"
      , "spark.mongodb.input.database" -> s"${database}"
      , "spark.mongodb.input.collection" -> s"${collection}"))
    val from_rdd = MongoSpark.load(sc,detail_from)

    val values = from_rdd.map(line=>{
      val nObject: JSONObject = JSON.parseObject(line.toJson())

      //扁平化flavors里面的字段：平台分类
      val flavors = nObject.getJSONArray("flavors")

      val rootCategoryId = flavors.getJSONObject(0).getString("rootCategoryId")
      val rootCategoryName = flavors.getJSONObject(0).getString("rootCategoryName")
      val categoryId = flavors.getJSONObject(0).getString("categoryId")
      val categoryName = flavors.getJSONObject(0).getString("categoryName")

      nObject.put("rootCategoryId",rootCategoryId)
      nObject.put("rootCategoryName",rootCategoryName)
      nObject.put("categoryId",categoryId)
      nObject.put("categoryName",categoryName)
      nObject.remove("_id")
      nObject.toString
    })
    values.repartition(1).saveAsTextFile(savepath)
  }
}

