package com.o2o.cleaning.month.platform.ebusiness_plat.meituan_tg

import com.alibaba.fastjson.{JSON, JSONObject}
import com.mongodb.spark.MongoSpark
import com.mongodb.spark.config.ReadConfig
import com.mongodb.spark.rdd.MongoRDD
import com.o2o.cleaning.month.platform.ebusiness_plat.meituan_tg.Mttg._
import com.o2o.utils.times.TimesYearAll
import org.apache.spark.SparkContext
import org.apache.spark.sql.functions.lit
import org.apache.spark.sql.{DataFrame, SparkSession}
import org.bson.Document

/**
  * 步骤：1、提取补采信息给采集组
  *      2、合并两部分数据
  *      3、本月数据与上月数据关联  月销量  关联到的相减     关联不到的作为新增
 *
  * @author: gaoyadi
  * @Date: 2018/6/21 10:20
  * @Description:执行时需要修改的变量 month  flag  timeStamp  collection  address
  *    第一步：修改flag参数和数据集合名称，先拉取数据到OBS 同时把需要补采的数据提取出来 等待采集组补采完后 再执行第二步清洗
  *    当flag为bu的时候，原始全量数据备份 以及需要补采的数据提取  此时 collection为全量集合的名称 mt_tg_detail
  *    当flag为all的时候，补采好的数据备份以及数据清洗等  此时 collection为全量集合的名称 mt_tg_detail_bu
  * @Modify By:
  */
object Mttg_06 {

  //----------补采：bu  ------------计算：all----
//  var flag = "bu"
  var flag = "all"

  //平台名称
  var platform = "mttg"
  //当月的月份
  val year = 2020
  var month = 7
  var lastMonth = 6
  var last_lastMonth = 5
  var timeStamp = TimesYearAll.TIME202007  //每个月固定时间戳
  //mongo库中的库名称
  var database = "MT"
  var collection_0 = "mt_tg_detail"
//  var collection_0 = "mt_tg_detail_v5"
  var collection_1 = "mt_tg_detail_2006_1"

  //补采或者当月原始数据路径
  var sourcePathPart = s"s3a://o2o-sourcedata/obs-source-2020/${month}/${platform}/${collection}"
  //当月所有数据（包括原始的mt_tg_detail和补采的mt_tg_detail_bu两部分数据）
  var sourcePart1Path = s"s3a://o2o-sourcedata/obs-source-2020/${month}/${platform}/part1"
  var sourcePart2Path = s"s3a://o2o-sourcedata/obs-source-2020/${month}/${platform}/part2"
  //需要补采的路径（给采集组）
  var buPath = s"s3a://o2o-dataproces-group/chen_lixiu/2020/${month}/sourceBuCai/${platform}/bu"
  //上月及上上月原始数据路径
  var lastMonthPath = s"s3a://o2o-sourcedata/obs-source-2020/${lastMonth}/${platform}/*/*"
  var last_lastMonthPath = s"s3a://o2o-sourcedata/obs-source-2020/${last_lastMonth}/${platform}/*/*"

  //上月及上上月计算结果good文件路径  o2o-dataproces-group/chen_lixiu/2019/10/mttg/good/
  var lastGoodPath1 = s"s3a://dws-data/g_data/${year}/${month-1}/meituan_tg/" //上月结果
  var lastGoodPath2 = s"s3a://dws-data/g_data/${year}/${month-2}/meituan_tg/" //上上月结果
  var lastGoodPath3 = s"s3a://dws-data/g_data/${year}/${month-3}/meituan_tg/" //上3个月结果
  var lastGoodPath4 = s"s3a://dws-data/g_data/${year}/${month-4}/meituan_tg/" //上4个月结果
  var lastGoodPath5 = s"s3a://dws-data/g_data/${year-1}/${month+12-5}/meituan_tg/" //上5个月结果

  //地址路径
  var address = s"s3a://o2o-dimension-table/address_table/address_table_2020/${month}/address_platform/meituan_tg_address_2020_${month}/*"
  //三级id分类路径
  var catePath = "s3a://o2o-dimension-table/category_table/cate/cate0401/mttg/categoryId/*"
  //商品的清洗结果路径
  var resultPath = s"s3a://o2o-dataproces-group/zsc/${year}/${month}/${platform}/good_all"
  var sellPath = s"s3a://o2o-dataproces-group/chen_lixiu/${year}/${month}/${platform}/sell_good"
  var sell90Path = s"s3a://o2o-dataproces-group/chen_lixiu/${year}/${month}/${platform}/sell90_good"
  var sell180Path = s"s3a://o2o-dataproces-group/chen_lixiu/${year}/${month}/${platform}/sell180_good"
  //新增数据路径
  var newAddPath = s"s3a://o2o-dataproces-group/zsc/${year}/${month}/${platform}/newAdd"
  //提取的店铺路径
  var shopPath_json = s"s3a://o2o-sourcedata/obs_result_shop/${year}/${month}/${platform}"
  var shopPath_orc = s"s3a://dws-data/g_shop/${year}/${month}/meituan_tg"
  //校验路径
  var strPath = "s3a://o2o-dataproces-group/chen_lixiu/"

  val mongoArr = ("MT,mt_tg_detail_from","MT,mt_tg_detail_to")


  def main(args: Array[String]): Unit = {

    val spark = SparkSession.builder()
                    .master("local[*]")
      .appName("MTTG")
      .config("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
      .getOrCreate()

    var sc: SparkContext = spark.sparkContext
    sc.hadoopConfiguration.set("fs.s3a.access.key", "GAO7EO9FWKPJ8WFCQDME")
    sc.hadoopConfiguration.set("fs.s3a.secret.key", "LZ0xaHBSYKHaJ9ECDbX9f7zin79UZkXfGoNapRPL")
    sc.hadoopConfiguration.set("fs.s3a.endpoint", "https://obs.cn-north-1.myhuaweicloud.com")
    sc.setLogLevel("ERROR")
    //MT.mt_tg_detail_2006_0  第一次采集
    //MT.mt_tg_detail_2006_1  第二次采集


    //保存MongoDB数据---->OBS
    saveToObs(sc,spark,sourcePathPart,database,collection_0)
//    saveToObs(sc,spark,sourcePart2Path,database,collection_1)
//    println("===落地ORC成功===")

    //读取OBS数据
//    val fromDF: DataFrame = spark.read.json(sourcePart1Path)
//    fromDF.registerTempTable("fromtab")
//
//    val toDF: DataFrame = spark.read.json(sourcePart2Path)
//    toDF.registerTempTable("totab")

    /*spark.sql(
      """
        |
        |select
        |sum(eqt),
        |sum(gqt),
        |sum(lqt)
        |from
        |(
        |select
        |case
        |when (a.sellCount-b.sellCount)=0 then 1 else 0 end eqt,
        |case
        |when (a.sellCount-b.sellCount)>0 then 1 else 0 end gqt,
        |case
        |when (a.sellCount-b.sellCount)<0 then 1 else 0 end lqt
        |
        |from
        |totab a
        |left join
        |fromtab b
        |on a.good_id = b.good_id
        |)
      """.stripMargin
    ).show()*/


    //关联到的
  /*  val result: DataFrame = spark.sqlContext.sql(
      """
        |select
        |b.*,
        |cast((b.sellCount- a.sellCount)*6 as bigint) as sells
        |from
        |totab b
        |left join
        |fromtab a
        |on a.good_id = b.good_id
        |where a.good_id is not null
      """.stripMargin)
      .drop("sellCount").withColumnRenamed("sells","sellCount")//.
//      where("sellCount>0").where("priceText>0")
      .dropDuplicates("good_id")

    result.registerTempTable("result_tab")

    spark.sql(
      """
        |
        |select
        |count(1),
        |sum(sellCount),
        |sum(cast(sellCount * priceText as decimal(20,2))) salesAmount
        |from
        |result_tab
        |
        |
      """.stripMargin).show()

    println("关联到：")
    result.show()
    println("关联到的数量："+result.count())

    //关联不到的
    val result1: DataFrame = spark.sqlContext.sql(
      """
        |select
        |b.*
        |from
        |totab b
        |left join
        |fromtab a
        |on a.good_id = b.good_id
        |where a.good_id is null
      """.stripMargin).
      where("sellCount>0").where("priceText>0")
      .dropDuplicates("good_id")


    println("关联不到：")
    result1.show()
    println("关联不到的数量："+result1.count())
    result1.write.orc(newAddPath)


    //第二步：打标签
    var df_label = result
      .withColumn("timeStamp", lit(s"${timeStamp}"))
      .withColumn("platformName", lit("美团团购"))
      .withColumn("platformId", lit("23"))
    //第三步：关联分类
    val df_cate = mttgCate(spark, df_label)
    //      第四步：关联地址
    val df_addr = mttgAddress(spark, df_cate, address)
    //      第五步：商品数据落地
    df_addr.repartition(1).write.orc(resultPath)*/

  }

  /**
    * 保存到OBS上
    * @param sc
    * @param savepath
    * @param database
    * @param collection
    */
  def saveToObs(sc: SparkContext,spark: SparkSession,savepath:String,database:String,collection:String){
    val detail_from: ReadConfig = ReadConfig(Map(
      "spark.mongodb.input.uri" -> "mongodb://ob:O2Odata123!@ 192.168.0.149:27017/admin"
      , "spark.mongodb.input.database" -> s"${database}"
      , "spark.mongodb.input.collection" -> s"${collection}"))
    val from_rdd = MongoSpark.load(sc,detail_from)

    val values = from_rdd.map(line=>{
      val nObject: JSONObject = JSON.parseObject(line.toJson())

      //扁平化flavors里面的字段：平台分类
      val flavors = nObject.getJSONArray("flavors")

      val rootCategoryId = flavors.getJSONObject(0).getString("rootCategoryId")
      val rootCategoryName = flavors.getJSONObject(0).getString("rootCategoryName")
      val categoryId = flavors.getJSONObject(0).getString("categoryId")
      val categoryName = flavors.getJSONObject(0).getString("categoryName")

      nObject.put("rootCategoryId",rootCategoryId)
      nObject.put("rootCategoryName",rootCategoryName)
      nObject.put("categoryId",categoryId)
      nObject.put("categoryName",categoryName)
      nObject.remove("_id")
      nObject.toString
    })
    spark.read.json(values).write.json(savepath)
    //values.repartition(1).saveAsTextFile(savepath)
  }
}

