package com.o2o.cleaning.month.platform.ebusiness_plat.ddmc

import com.alibaba.fastjson.{JSON, JSONObject}
import com.o2o.utils.Iargs
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, SparkSession}
import com.o2o.cleaning.month.platform.ebusiness_plat.brand_modular.brand_join_res
import com.o2o.utils.times.TimesYearAll

/**
  * 叮咚买菜	共享累计销量
  * 192.168.0.149
  * DingDongMaiCai.ddmc_detail_2007
  * add_to_field  (最后一个销量-第一个销量)*价格=销售额
  * 存在优惠信息字段  add_to_field->coupon
  */
object DingDongMaiCai {

  //平台名称
  var platform = "dingdongmc"
  //当月的月份
  var month = 8
  var lastMonth = 7
  //每个月固定时间戳
  var year = 2021
  var timeStamp = TimesYearAll.TIME202108

  //MongoDB源数据路径
  val sourcePath = s"s3a://o2o-sourcedata-2021/obs-source-${year}/${month}/${platform}/${platform}_${year}_${month}/"
  //  val sourcePath = s"s3a://o2o-dataproces-group/zyf/obs-source-${year}/${year}/${month}/${platform}/${platform}_${year}_${month}/"

  //分类表路径
  val catePath = s"s3a://o2o-dimension-table/category_table/jd/jd_sub_v26/*"

  //地址表路径
  //  val addressPath = s"s3a://o2o-dimension-table/address_table/address_table_2020/11/address_platform/dingdongmc_address_2020_11/"
  val addressPath = s"s3a://o2o-dimension-table/address_table/address_table_${year}/${month}/address_platform/${platform}_address_${year}_${month}/"

  //结果路径
  val resultPath = s"s3a://o2o-dataproces-group/zyf/${year}/${month}/${platform}/good/"

  def main(args: Array[String]): Unit = {
    val spark = SparkSession.builder()
      .appName(s"${this.getClass.getSimpleName}")
      .config("spark.debug.maxToStringFields", "2000")
      .config("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
      .master("local[*]")
      .getOrCreate()

    val sc = spark.sparkContext
    sc.hadoopConfiguration.set("fs.s3a.access.key", Iargs.OBSACCESS)
    sc.hadoopConfiguration.set("fs.s3a.secret.key", Iargs.OBSSECRET)
    sc.hadoopConfiguration.set("fs.s3a.endpoint", Iargs.OBSENDPOINT)
    sc.setLogLevel("WARN")


    //获取MongoDB源数据，计算销量销售额折扣
    val dingdongmc = spark.read.orc(sourcePath)
    dingdongmc.registerTempTable("dingdongmc")
    val sql =
      """
        |select product_id, round(avg(price) ,2)priceText from
        |(select good_id,add_to_field[size(add_to_field)-1].priceText as price,product_id from dingdongmc)
        |group by product_id
        |""".stripMargin
    val res_sql = spark.sql(sql)
    res_sql.registerTempTable("restabl")

    val join_price = spark.sql(
      """
        |select a.*,b.priceText from
        |dingdongmc a left join restabl b on a.product_id = b.product_id
        |""".stripMargin)


    //计算销售量
    val caculateRDD: RDD[String] = join_price.toJSON.rdd.map(lines => {
      val nObject: JSONObject = JSON.parseObject(lines)
      val Base_Info = nObject.getOrDefault("Base_Info", "-1").toString
      val add = nObject.getJSONArray("add_to_field")
      val addObject = nObject.getJSONArray("add_to_field").toArray()
      val array_sort = addObject.filter(x => {
        JSON.parseObject(x.toString).get("totalSell").toString.toInt > 0
      }).sortBy(x => {
        val crawl_date = JSON.parseObject(x.toString).get("crawl_date")
        Integer.parseInt(crawl_date.toString)
      })
      var lastSell = 0
      var midSell = 0
      var sellCount = 0
      if (array_sort.size > 1) {
        var headSell = JSON.parseObject(array_sort(0).toString()).get("totalSell").toString.toInt
        //      println("headsell" + headSell)
        // 获取第一个不为零的销量
        var i = 1
        //      println(11)
        while (i < (array_sort.length - 1) && headSell <= 0) {
          //        println(22)
          //        println("i = "+i)
          headSell = JSON.parseObject(array_sort(i).toString()).get("totalSell").toString.toInt
          //        println("headsell" + headSell)
          i += 1
        }
        if (headSell != 0) {
          // 从第二个开始判断
          var i = 1
          while (i < (array_sort.length - 1)) {
            lastSell = JSON.parseObject(array_sort(i).toString()).get("totalSell").toString.toInt
            // 如果这个除第一个大于基数，那么销量用这一个的前一个减第一天
            if (lastSell > headSell && (lastSell - headSell < 10000) && (lastSell - headSell > 0)) {
              midSell = lastSell
            }
            i += 1
          }
          sellCount = midSell - headSell
        } else {
          sellCount = 0
        }
      } else {
        sellCount = 0
      }
      //      println(sellCount)
      //      val lastSellCount = add.getJSONObject(add.size() - 1).getInteger("totalSell")
      val lastpriceText: String = nObject.getString("priceText")

      //      val firstSellCount = add.getJSONObject(0).getInteger("totalSell")

      var coupon = "-1"
      if (!add.getJSONObject(add.size() - 1).getString("coupon").equals("-1")) {
        coupon = add.getJSONObject(add.size() - 1).getJSONArray("coupon").get(0).toString
      }

      //      var sellCount = ((lastSellCount - firstSellCount)*30/27).toInt
      //      var sellCount  = lastSellCount - firstSellCount


      if (sellCount < 0) {
        sellCount = 0
      }
      // 这是为了平滑销量，跟历史的销量进行对比，计算出的系数
      //      sellCount = (sellCount * 0.52).toInt
      val salesAmount = (sellCount * lastpriceText.toDouble).formatted("%.2f")

      nObject.put("sellCount", sellCount)
      nObject.put("salesAmount", salesAmount)
      nObject.put("priceText", lastpriceText)
      nObject.put("coupon", coupon)

      nObject.remove("add_to_field")
      nObject.remove("province")
      nObject.remove("city")
      nObject.remove("address")
      nObject.remove("longitude")
      nObject.remove("latitude")
      nObject.toString
    })
    //    spark.read.json(caculateRDD).show(5)
    val resultDF: DataFrame = spark.read.json(caculateRDD).where("sellCount > 0").dropDuplicates("product_id")

    resultDF.cache()
    resultDF.registerTempTable("resultDF")
    val res_d = spark.sql(
      """
        |select count(1),sum(sellCount) sell,sum(salesAmount) from resultDF
        |""".stripMargin).show(false)




    //1.提取折扣
    println("======提取折扣=======")
    //    resultDF.select("coupon").dropDuplicates().repartition(1).write.csv("D:\\ddmc\\coupon1\\")
    println("======打基础标签=======")
    //2.打基础标签
    import org.apache.spark.sql.functions._
    val frame: DataFrame = resultDF.withColumn("shopType", lit("B"))
      .withColumn("platformId", lit("65")).withColumn("platformName", lit("叮咚买菜"))
      .withColumn("timeStamp", lit(s"${timeStamp}"))
    println("======匹配分类=======")
    //3.匹配分类
    val cateDF: DataFrame = spark.read.json(catePath)
    //    spark.read.json()
    val cateResultDF: DataFrame = cateMatch(spark, frame, cateDF)
    //.na.fill(Map("firstCategoryId" -> "10099","secondCategoryId" -> "1009999","thirdCategoryId" -> "100999999","fourthCategoryId" -> "10099999999"))

    //4.匹配地址
    println("======匹配地址=======")
    val addressDF: DataFrame = spark.read.json(addressPath)
    val addressResultDF: DataFrame = addressMatch(spark, cateResultDF, addressDF)
    addressResultDF.write.orc(resultPath)

    //    addressResultDF.registerTempTable("addtab")

    //    spark.sql(
    //      """
    //        |select
    //        |firstCategoryId
    //        |from
    //        |addtab
    //        |group by firstCategoryId
    //      """.stripMargin).show()

    //5.关联品牌 run 的时候记得打开
    //    val brand = new brand_join_res
    //    brand.brandJoinResult(addressResultDF, resultPath, Iargs.YEAR.toInt, Iargs.MONTH.toInt, platform, spark)

    //    addressResultDF.repartition(2).write.json(resultPath)
    //每月处理完放 g-data下一份obs://dws-data/g_data/2021/7/
    //spark.read.json(brand)
    //5.校验结果
    /*println("======品牌=======")

//    val brandResultDF = cateResultDF.withColumnRenamed("platformName","platform").select("brandName").select("brandValueId")
//      .select("firstCategoryId").select("platform")

    cateResultDF.registerTempTable("catet")

    val brandDF = spark.sql(
      """
        |select
        |brandName,
        |brandValueId,
        |firstCategoryId,
        |platformName as platform
        |from
        |catet
      """.stripMargin).dropDuplicates()

    brandDF.repartition(1).write.json("D:\\ddmc\\brand\\")


    //6.计算销量

    spark.sql(
      """
        |select
        |count(1),
        |sum(sellCount),
        |sum(salesAmount)
        |from
        |catet
      """.stripMargin).show()*/
  }


  /**
    * 匹配地址
    *
    * @param spark
    * @param cateResultDF
    * @param addressDF
    */
  def addressMatch(spark: SparkSession, cateResultDF: DataFrame, addressDF: DataFrame) = {
    cateResultDF.withColumnRenamed("station_id", "shopId").registerTempTable("sourcetab")
    addressDF.registerTempTable("addresstab")

    val addressResultDF = spark.sql(
      """
        |select
        |a.*,
        |CASE when b.district_origin is null then '-1' else b.district_origin end district_origin,
        |CASE when b.town is null then '-1' else b.town end town,
        |CASE when b.address is null then '-1' else b.address end address,
        |CASE when b.name is null then '-1' else b.name end name,
        |CASE when b.latitude is null then '-1' else b.latitude end latitude,
        |CASE when b.longitude is null then '-1' else b.longitude end longitude,
        |CASE when b.registration_institution is null then '-1' else b.registration_institution end registration_institution,
        |CASE when b.administrative_region is null then '-1' else b.administrative_region end administrative_region,
        |CASE when b.city is null then '-1' else b.city end city,
        |CASE when b.city_grade is null then '-1' else b.city_grade end city_grade,
        |CASE when b.city_origin is null then '-1' else b.city_origin end city_origin,
        |CASE when b.economic_division is null then '-1' else b.economic_division end economic_division,
        |CASE when b.if_city is null then '-1' else b.if_city end if_city,
        |CASE when b.if_district is null then '-1' else b.if_district end if_district,
        |CASE when b.if_state_level_new_areas is null then '-1' else b.if_state_level_new_areas end if_state_level_new_areas,
        |CASE when b.poor_counties is null then '-1' else b.poor_counties end poor_counties,
        |CASE when b.province is null then '-1' else b.province end province,
        |CASE when b.rural_demonstration_counties is null then '-1' else b.rural_demonstration_counties end rural_demonstration_counties,
        |CASE when b.rural_ecommerce is null then '-1' else b.rural_ecommerce end rural_ecommerce,
        |CASE when b.the_belt_and_road_city is null then '-1' else b.the_belt_and_road_city end the_belt_and_road_city,
        |CASE when b.the_belt_and_road_province is null then '-1' else b.the_belt_and_road_province end the_belt_and_road_province,
        |CASE when b.the_yangtze_river_economic_zone_city is null then '-1' else b.the_yangtze_river_economic_zone_city end the_yangtze_river_economic_zone_city,
        |CASE when b.the_yangtze_river_economic_zone_province is null then '-1' else b.the_yangtze_river_economic_zone_province end the_yangtze_river_economic_zone_province,
        |CASE when b.urban_agglomerations is null then '-1' else b.urban_agglomerations end urban_agglomerations,
        |CASE when b.aedzId is null then '-1' else b.aedzId end aedzId,
        |CASE when b.regional_ID is null then '-1' else b.regional_ID end regional_ID,
        |CASE when b.district is null then '-1' else b.district end district
        |from
        |sourcetab a
        |left join
        |addresstab b
        |on a.shopId = b.shopId
      """.stripMargin)

    addressResultDF.cache()

    //校验没打上地址的数据

    addressResultDF.registerTempTable("addresult")
    println("===没打上地址的数据===")
    spark.sql(
      """
        |select
        |*
        |from
        |addresult
        |where province is null
      """.stripMargin).show()

    addressResultDF
  }

  /**
    * 匹配分类
    *
    * @param spark
    * @param sourceDF
    * @param cateDF
    */
  def cateMatch(spark: SparkSession, sourceDF: DataFrame, cateDF: DataFrame): DataFrame = {
    cateDF.registerTempTable("catetab")
    sourceDF.registerTempTable("sourcetab")

    val cateResultDF = spark.sql(
      """
        |select
        |a.*,
        |case when b.firstCategoryId is null then 10099 else b.firstCategoryId end firstCategoryId,
        |case when b.secondCategoryId is null then 1009999 else b.secondCategoryId end secondCategoryId,
        |case when b.thirdCategoryId is null then 100999999 else b.thirdCategoryId end thirdCategoryId,
        |case when b.fourthCategoryId is null then 10099999999 else b.fourthCategoryId end fourthCategoryId
        |from
        |sourcetab a
        |left join
        |catetab b
        |on a.subCategoryId = b.subCategoryId
      """.stripMargin)

    cateResultDF.cache()

    //检查是否存在没有打上分类的数据
    cateResultDF.registerTempTable("cateresult")
    println("===没有打上分类的数据===")
    val r = spark.sql(
      """
        |select
        |*
        |from
        |cateresult
        |where firstCategoryId is null
      """.stripMargin)

    //    r.repartition(1).write.json("D:\\ddmc\\catea\\")

    println(r.count())

    cateResultDF
  }

}

