package com.o2o.cleaning.month.platform.ebusiness_plat.meituan

import com.alibaba.fastjson.{JSON, JSONObject}
import org.apache.spark.sql._

/**
  * meituan 数据的处理
  *
  * Description:  2019
  *
  */

object Meituan_wm {

  val timeStamp = Meituan_config.timeStamp


  // 美团采集基础数据
  val detail_sourcePath = Meituan_config.detail_sourcePath

  // 美团采集地址数据和 美团详情数据的地址 的合并数据路径
  val resultAddress = Meituan_config.resultAddress

  //美团数据分类表
  val category_path = Meituan_config.category_path

  //保存数据
  //商品的清洗结果路径
  val resultPath = Meituan_config.resultPath


  def main(args: Array[String]): Unit = {
    val spark = SparkSession.builder()
      .appName(s"${this.getClass.getSimpleName}")
      .config("spark.debug.maxToStringFields", "2000")
      .config("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
      //.master("local[*]")
      .getOrCreate()
    val sc = spark.sparkContext
    sc.hadoopConfiguration.set("fs.s3a.access.key", "GAO7EO9FWKPJ8WFCQDME")
    sc.hadoopConfiguration.set("fs.s3a.secret.key", "LZ0xaHBSYKHaJ9ECDbX9f7zin79UZkXfGoNapRPL")
    sc.hadoopConfiguration.set("fs.s3a.endpoint", "https://obs.cn-north-1.myhuaweicloud.com")
    sc.setLogLevel("WARN")
    // 第一步：从详情数据中抽取地址数据和新增分类
    SourceShopAddress.extract_Address_newAddCategory(spark, detail_sourcePath)

    // 第二步：处理地址
      Address_Gov_Store_2019_Meituan.meituan_handleAddress(spark, Meituan_config.platform_Name)

    // 第三步：处理美团数据
    val data_good: DataFrame = handleMeituanData(spark)


    //临时需求，
    /* val canyingmeishi  = data_good.where("firstCategoryId = 10028")
     canyingmeishi.repartition(10)
       .write.orc("s3a://o2o-dataproces-group/yang_songjian/test/canyinmeishi/finalResultData")*/

    /* val frame = UpdateRepastResult.updateGt200wan(spark, data_good)
       .selectExpr("*", "round(salesAmount,2) as salesAmount_bak").drop("salesAmount")
       .withColumnRenamed("salesAmount_bak", "salesAmount")*/
    //frame.coalesce(20).write.orc(Meituan_config.resultPath)
    val resultPath = "s3a://o2o-dataproces-group/xuechunhua/product/meituan/2020/10/finalResultData_new/"
    val frame = spark.read.orc(resultPath)
    import org.apache.spark.sql.functions._
    frame.agg(count("*"), sum("sellCount"),
      sum("salesAmount")).show(false)
    frame.selectExpr("sum(sellCount)", "sum(salesAmount)", "count(good_id)").show(false)
    frame.selectExpr("sum(distinct shopId) countShop", "sum(sellCount)sellCount",
      "sum(salesAmount)salesAmount").show(false)

    /* frame.createOrReplaceTempView("selectManyAddress")
     val middleresult = spark.sql(
       """
         |select province,count(*) count
         |from selectManyAddress
         |group by province
         |having count >1
         |""".stripMargin)
     val result = if (middleresult.count() > 0) {
       UpdateRepastResult.updateManyAddress(spark, frame)
     } else {
       frame
     }
     result.coalesce(20).write.orc(Meituan_config.resultPath)
     //result.selectExpr("sum(sellCount)", "sum(salesAmount)").show(100, 100)
     import org.apache.spark.sql.functions._
     result.agg(count("*"), sum("sellCount"), sum("salesAmount")).show(false)
     result.selectExpr("sum(sellCount)", "sum(salesAmount)", "count(good_id)").show()
     result.selectExpr("sum(distinct shopId) countShop", "sum(sellCount)sellCount", "sum(salesAmount)salesAmount").show(10)*/
    //去年同月的数据路径
    val lastMonthData: DataFrame = spark.read.orc("s3a://dws-data/g_data/2019/10/meituan/")
    //    //结果数据路径-省份同比占比
    val provinceyoy = "s3a://o2o-dataproces-group/xuechunhua/product/meituan/2020/10/tuisuan/provincehuanbi_2"
    //    //四大区同比占比路径
    val economic_division_yoyzhanbi = "s3a://o2o-dataproces-group/xuechunhua/product/meituan/2020/10/tuisuan/economic_division_huanbi_2"
        //当月数据val result: DataFrame = spark.read.orc("s3a://o2o-dataproces-group/yang_songjian/product/meituan/2020/9/finalResultData/")
        // val result: DataFrame = spark.read.orc("s3a://dws-data/g_data/2020/8/meituan/")
        //val result: DataFrame = spark.read.orc(Meituan_config.resultPath)
        /*import org.apache.spark.sql.functions._
        result.agg(count("*"), sum("sellCount"), sum("salesAmount")).show(false)
        result.selectExpr("sum(sellCount)","sum(salesAmount)","count(good_id)").show()*/
//    MeituanTuisuan.jisuanyoyandzhabi(spark, lastMonthData, frame, provinceyoy, economic_division_yoyzhanbi)
    sc.stop()
  }


  def handleMeituanData(spark: SparkSession): DataFrame = {

    val df = spark.read.orc("s3a://o2o-dataproces-group/xuechunhua/product/meituan/2020/10/meituan_11_5_bu/").toJSON.rdd
    val value = df.map(line => {

      val lines = JSON.parseObject(line)
      val sellCount = lines.getOrDefault("sellCount", "-1").toString.toLong
      val priceText = lines.getOrDefault("priceText", "-1").toString.toDouble.formatted("%.2f").toDouble
      lines.put("priceText", priceText)
      lines.put("sellCount", sellCount)
      lines.put("salesAmount", (priceText * sellCount).formatted("%.2f").toDouble)

      lines.put("praiseNum", lines.getOrDefault("praiseNum", "-1").toString)
      /** *****************************/
      val evaluates = lines.get("evaluates").toString
      var ev = "-1"
      var es = "-1"
      var str = new JSONObject

      //下面的判断是在做什么？
      if (evaluates.equals("-1")) {
        ev = "{\"fuyi\":\"-1\"}"
      } else {
        str = JSON.parseObject(evaluates)
      }
      if (!ev.contains("fuyi")) {
        lines.put("evaluates", str)
      } else {
        val evs = JSON.parseObject(ev)
        lines.put("evaluates", evs)
      }

      lines.put("timeStamp", s"${timeStamp}")
      lines.put("platformName", "美团外卖")
      lines.put("platformId", "16")
      lines.put("platformName_spelling", "meituan")

      lines.toString

    })

    val dataDF = spark.read.json(value).filter("sellCount > '0' and priceText > '0'")
      .drop("city", "province", "regional_ID", "regional_id", "latitude", "longitude", "address",
        "firstCategoryId", "secondCategoryId", "thirdCategoryId", "fourthCategoryId")

    dataDF.createOrReplaceTempView("meituan_basic_table")


    /**
      * join  meituan_category address_mapping
      */
    spark.read.json("s3a://o2o-dataproces-group/xuechunhua/product/meituan/2020/10/resultAddress_11_5_bu/").withColumnRenamed("shopId", "shopId_addr")
//    spark.read.json(resultAddress).withColumnRenamed("shopId", "shopId_addr")
      .createOrReplaceTempView("meituan_addr")
    val cateDF = spark.read.json(category_path)

    //firstCategoryId secondCategoryId thirdCategoryId fourthCategoryId food_type
    cateDF.createOrReplaceTempView("meituan_cate")

    val cate_addr_data_DF = spark.sql(
      """
        |select
        |a.*,
        |d.*,
        |IFNULL(b.firstCategoryId,'10099') firstCategoryId,
        |IFNULL(b.secondCategoryId,'1009999') secondCategoryId,
        |IFNULL(b.thirdCategoryId,'100999999') thirdCategoryId,
        |IFNULL(b.fourthCategoryId,'10099999999') fourthCategoryId,
        |b.food_type food_type
        |from
        |meituan_basic_table a
        |left join
        |meituan_cate b
        |on
        |a.categoryId=b.categoryId
        |left join
        |meituan_addr d
        |on a.shopId = d.shopId_addr
      """.stripMargin)
      .drop("need_check", "platformName_spelling", "md5_id", "pre_md5", "crawl_date", "shop_pre_md5", "md5_shopId", "shopId_addr")
      .dropDuplicates("good_id")


    val source_value_rdd = cate_addr_data_DF.toJSON.rdd.map(line => {

      val nObject: JSONObject = JSON.parseObject(line)
      val evaluates = nObject.getOrDefault("evaluates", "-1").toString
      nObject.put("evaluates", s"${evaluates}")

      val address: String = nObject.getOrDefault("address", "0").toString
      val is_premium: String = nObject.getOrDefault("is_premium", "-1").toString
      val flavors: String = nObject.getOrDefault("flavors", "-1").toString
      val emotionalKeywords: String = nObject.getOrDefault("emotionalKeywords", "-1").toString
      val phone: String = nObject.getOrDefault("phone", "-1").toString
      val opening_hours: String = nObject.getOrDefault("opening_hours", "-1").toString
      val order_lead_time: String = nObject.getOrDefault("order_lead_time", "-1").toString
      val promotion_info: String = nObject.getOrDefault("promotion_info", "-1").toString
      val latitude: String = nObject.getOrDefault("latitude", "-1").toString
      val longitude: String = nObject.getOrDefault("longitude", "-1").toString
      val shopCommentCount: String = nObject.getOrDefault("shopCommentCount", "-1").toString

      nObject.put("shopSellCount", s"${nObject.getOrDefault("shopSellCount", "-1")}")
      nObject.put("phone", s"${nObject.getOrDefault("phone", "-1")}")
      nObject.put("street", s"${nObject.getOrDefault("street", "-1")}")
      nObject.put("shopImages", s"${nObject.getOrDefault("shopImages", "-1")}")
      nObject.put("dpShopId", s"${nObject.getOrDefault("dpShopId", "-1")}")
      nObject.put("mtWmPoiId", s"${nObject.getOrDefault("mtWmPoiId", "-1")}")

      nObject.put("address", s"${address}")
      nObject.put("is_premium", s"${is_premium}")
      nObject.put("flavors", s"${flavors}")
      nObject.put("emotionalKeywords", s"${emotionalKeywords}")
      nObject.put("phone", s"${phone}")
      nObject.put("opening_hours", s"${opening_hours}")
      nObject.put("evaluates", s"${evaluates}")
      nObject.put("order_lead_time", s"${order_lead_time}")
      nObject.put("promotion_info", s"${promotion_info}")
      nObject.put("longitude", s"${longitude}")
      nObject.put("latitude", s"${latitude}")
      nObject.put("shopCommentCount", s"${shopCommentCount}")

      var goodRatePercentage = nObject.getOrDefault("goodRatePercentage", "-1").toString
      if (goodRatePercentage.contains("-")) goodRatePercentage = "-1"
      nObject.put("goodRatePercentage", goodRatePercentage)

      nObject.toString
    })

    val data_good: DataFrame = spark.read.json(source_value_rdd)

    data_good
  }


}
