package com.o2o.cleaning.month.platform.ebusiness_plat.dazhongdp

import com.alibaba.fastjson.{JSON, JSONObject}
import com.mongodb.spark.MongoSpark
import com.mongodb.spark.config.ReadConfig
import com.o2o.utils.times.TimesYearAll
import org.apache.spark.SparkContext
import org.apache.spark.sql.{DataFrame, SparkSession}

/**
  * 步骤：1、提取补采信息给采集组
  *      2、合并两部分数据
  *      3、本月数据与上月数据关联  月销量  关联到的相减     关联不到的作为新增
 *
  * @author: gaoyadi
  * @Date: 2018/6/21 10:20
  * @Description:执行时需要修改的变量 month  flag  timeStamp  collection  address
  *    第一步：修改flag参数和数据集合名称，先拉取数据到OBS 同时把需要补采的数据提取出来 等待采集组补采完后 再执行第二步清洗
  *    当flag为bu的时候，原始全量数据备份 以及需要补采的数据提取  此时 collection为全量集合的名称 mt_tg_detail
  *    当flag为all的时候，补采好的数据备份以及数据清洗等  此时 collection为全量集合的名称 mt_tg_detail_bu
  * @Modify By:
  */
object DaZhongdpTest1 {

  //----------补采：bu  ------------计算：all----
//  var flag = "bu"
  var flag = "all"

  //平台名称
  var platform = "dzdp"
  //当月的月份
  val year = 2020
  var month = "7"
  var lastMonth = "6"
  var last2Month = "5"
  //每个月固定时间戳
  var timeStamp = TimesYearAll.TIME202007  //每个月固定时间戳
  //mongo库中的库名称
  var database = "Dzdp"
  var collection_0 = "dzdp_app_shop_info_2007"
  var collection_1 = "dzdp_app_shop_info_two_2007"

  //补采或者当月原始数据路径
  var sourcePart0Path = s"s3a://o2o-sourcedata/obs-source-2020/${month}/${platform}/part1"
  var sourcePart1Path = s"s3a://o2o-sourcedata/obs-source-2020/${month}/${platform}/part2"



  def main(args: Array[String]): Unit = {

    val spark = SparkSession.builder()
                    .master("local[*]")
      .appName("DZDP")
      .config("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
      .getOrCreate()

    var sc: SparkContext = spark.sparkContext
    sc.hadoopConfiguration.set("fs.s3a.access.key", "GAO7EO9FWKPJ8WFCQDME")
    sc.hadoopConfiguration.set("fs.s3a.secret.key", "LZ0xaHBSYKHaJ9ECDbX9f7zin79UZkXfGoNapRPL")
    sc.hadoopConfiguration.set("fs.s3a.endpoint", "https://obs.cn-north-1.myhuaweicloud.com")
    sc.setLogLevel("ERROR")

    //保存MongoDB数据---->OBS
//    saveToObs(sc,spark,sourcePart0Path,database,collection_0)
//    saveToObs(sc,spark,sourcePart1Path,database,collection_1)
//    println("===落地JSON成功===")

    //读取OBS数据
    val fromDF: DataFrame = spark.read.json(sourcePart0Path)


    val toDF: DataFrame = spark.read.json(sourcePart1Path)


    //计算销量
    println("======计算销量======")
    val infoDF = caculate(spark,sc,fromDF,toDF)

    //第三步：关联分类
    println("======关联分类======")
    val df_cate = DaZhongdp.dzdpCate(spark, infoDF)

    df_cate.registerTempTable("infotab")

    println("=======关联到的总额=======")
    spark.sql(
      """
        |select
        |count(1),
        |sum(sellCount) as sellCount,
        |sum(sales) as salesAmount
        |from
        |infotab
      """.stripMargin).show()

    println("10028美食分类的量")
    spark.sql(
      """
        |
        |select
        |count(1) ct,
        |sum(sales),
        |sum(sellCount)
        |from
        |infotab
        |where firstCategoryId='10028'
      """.stripMargin).show()

    println("所有分类的量")
    spark.sql(
      """
        |
        |select
        |count(1) ct,
        |sum(sales),
        |sum(sellCount),
        |firstCategoryId
        |from
        |infotab
        |group by firstCategoryId
        |order by ct desc
      """.stripMargin).show()


  }


  def caculate(spark:SparkSession,sc:SparkContext,fromDF:DataFrame,toDF:DataFrame): DataFrame ={
    fromDF.registerTempTable("fromtab")
    toDF.registerTempTable("totab")

    println("=======关联到的总额=======")
    val infoDF=spark.sql(
      """
        |select
        |a.*,
        |cast((a.sellCount-b.sellCount)*6 as bigint) as sell,
        |CAST((a.sellCount-b.sellCount)*a.priceText*6.0 as decimal(20,2)) as sales
        |from
        |totab a
        |left join
        |fromtab b
        |on a.good_id=b.good_id
        |where b.good_id is not null
      """.stripMargin).drop("sellCount").withColumnRenamed("sell","sellCount")

    println("=======关联不到的总额=======")
    spark.sql(
      """
        |select
        |count(1),
        |sum(sellCount) as sellCount,
        |sum(sales) as salesAmount
        |from(
        |select
        |a.*,
        |cast(a.sellCount*a.priceText as decimal(20,2)) as sales
        |from
        |totab a
        |left join
        |fromtab b
        |on a.good_id=b.good_id
        |where b.good_id is null
        |)
        |
      """.stripMargin).show()

    infoDF
  }

  /**
    * 保存到OBS上
    * @param sc
    * @param savepath
    * @param database
    * @param collection
    */
  def saveToObs(sc: SparkContext,spark: SparkSession,savepath:String,database:String,collection:String){
    val detail_from: ReadConfig = ReadConfig(Map(
      "spark.mongodb.input.uri" -> "mongodb://ob:O2Odata123!@ 192.168.0.149:27017/admin"
      , "spark.mongodb.input.database" -> s"${database}"
      , "spark.mongodb.input.collection" -> s"${collection}"))
    val from_rdd = MongoSpark.load(sc,detail_from)

    val values = from_rdd.map(line=>{
      val nObject: JSONObject = JSON.parseObject(line.toJson())

      //扁平化flavors里面的字段：平台分类
      val flavors = nObject.getJSONArray("flavors")

      val rootCategoryId = flavors.getJSONObject(0).getString("rootCategoryId")
      val rootCategoryName = flavors.getJSONObject(0).getString("rootCategoryName")
      val categoryId = flavors.getJSONObject(0).getString("categoryId")
      val categoryName = flavors.getJSONObject(0).getString("categoryName")

      nObject.put("rootCategoryId",rootCategoryId)
      nObject.put("rootCategoryName",rootCategoryName)
      nObject.put("categoryId",categoryId)
      nObject.put("categoryName",categoryName)
      nObject.remove("_id")
      nObject.toString
    })
    values.repartition(1).saveAsTextFile(savepath)
  }
}

