package com.o2o.cleaning.month.platform.ebusiness_plat.meituan_tg

import com.alibaba.fastjson.{JSON, JSONObject}
import com.o2o.utils.Iargs
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, Dataset, Row, SparkSession}

/**
  * @ Auther: o2o-rd-0008
  * @ Date:   2020/6/5 16:23
  * @ Param:  ${PARAM}
  * @ Description: 
  */
object CheckMTTGObsData {
  def main(args: Array[String]): Unit = {

    val spark = SparkSession.builder()
      .appName(s"${this.getClass.getSimpleName}")
      .config("spark.debug.maxToStringFields", "2000")
      .config("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
      .config("spark.sql.caseSensitive", "true")
//      .config("es.nodes", "192.168.2.247")
//      .config("es.port", "9200")
//      .config("cluster.name","O2OElastic")
//      .config("es.net.http.auth.user", "elastic")
//      .config("es.net.http.auth.pass", "changeme")
//      .master("local[*]")
      .getOrCreate()

    val sc = spark.sparkContext
    sc.hadoopConfiguration.set("fs.s3a.access.key", Iargs.OBSACCESS)
    sc.hadoopConfiguration.set("fs.s3a.secret.key", Iargs.OBSSECRET)
//    sc.hadoopConfiguration.set("fs.s3a.endpoint", Iargs.OBSENDPOINT)
    sc.setLogLevel("WARN")
    var year ="2020"
    var month = "6"
    var platform = "mttg"

    val index5 = "2020_mttg_5"
    val index6 = "2020_mttg_6"

    import org.elasticsearch.spark._
    val data5: RDD[String] = sc.esJsonRDD(index5).values
    val data6: RDD[String] = sc.esJsonRDD(index6).values
//    spark.read.json(data5).printSchema()
//    spark.read.json(data6).printSchema()


    //good路径
/*    var goodPath = s"s3a://o2o-dataproces-group/zsc/${year}/${month}/${platform}/good"
//    var goodmeishiPath = s"s3a://o2o-dataproces-group/zsc/${year}/${month}/${platform}/meishi1/good"
//    var goodAllPath = s"s3a://o2o-dataproces-group/zsc/${year}/${month}/${platform}/good_all"
//    var goodtestPath = s"s3a://o2o-dataproces-group/zsc/${year}/${month}/${platform}/good_test"
//    var newAddPath = s"s3a://o2o-dataproces-group/zsc/${year}/${month}/${platform}/newAdd"
    var lastgoodPath = s"s3a://dws-data/g_data/2020/5/meituan_tg/"
    spark.read.orc(goodPath).printSchema()
    spark.read.orc(lastgoodPath).printSchema()*/

    val sixmonth: RDD[String]  = spark.read.json(data6).dropDuplicates().where("district != '大兴区'")
      .where("district != '房山区'").where("district != '丰台区'").toJSON.rdd.map(lines=>{
      val nObject: JSONObject = JSON.parseObject(lines)
      var ev = "-1"
      var evaluates = nObject.get("evaluates").toString
      var str = new JSONObject
      if (evaluates.equals("-1")||evaluates.equals("")) {
        ev = "{\"fuyi\":\"-1\"}"
      } else {
        str = JSON.parseObject(evaluates)
      }
      if (!ev.contains("fuyi")) {
        nObject.put("evaluates", str.toString)
      } else {
        val evs = JSON.parseObject(ev)
        nObject.put("evaluates", evs.toString)
      }

      var li = "-1"
      var str1 = new JSONObject
      var loc_info = nObject.getString("loc_info")
      if (loc_info.equals("-1")||loc_info.equals("")) {
        li = "{\"fuyi\":\"-1\"}"
      } else {
        str1 = JSON.parseObject(loc_info)
      }
      if (!li.contains("fuyi")) {
        nObject.put("loc_info", str1.toString)
      } else {
        val evs = JSON.parseObject(li)
        nObject.put("loc_info", evs.toString)
      }
      nObject.toString
    })

    val sixDF: DataFrame = spark.read.json(sixmonth)




      //.registerTempTable("tab")

    val fivemonth: RDD[String] = spark.read.json(data5).dropDuplicates().where("district != '大兴区'")
      .where("district != '房山区'").where("district != '丰台区'")
      .toJSON.rdd.map(lines => {
      val nObject: JSONObject = JSON.parseObject(lines)

      val randomnum = scala.util.Random.nextInt(9)
      var num = randomnum / 100.0 + 0.2

      val firstCategoryId: String = nObject.getString("firstCategoryId")

      if (firstCategoryId.equals("10028"))
        num = randomnum / 100.0 + 1.0


      val sellCount = nObject.getOrDefault("sellCount", "0").toString
      val salesAmount = nObject.getOrDefault("salesAmount", "0").toString

      val sellCountnew = (sellCount.toInt * num).toInt
      val salesAmountnew = (salesAmount.toDouble * num).formatted("%.2f")
      nObject.put("sellCount", sellCountnew)
      nObject.put("salesAmount", salesAmountnew)

      var ev = "-1"
      var evaluates = nObject.get("evaluates").toString
      var str = new JSONObject
      if (evaluates.equals("-1")||evaluates.equals("")) {
        ev = "{\"fuyi\":\"-1\"}"
      } else {
        str = JSON.parseObject(evaluates)
      }
      if (!ev.contains("fuyi")) {
        nObject.put("evaluates", str.toString)
      } else {
        val evs = JSON.parseObject(ev)
        nObject.put("evaluates", evs.toString)
      }

      var li = "-1"
      var str1 = new JSONObject
      var loc_info = nObject.getString("loc_info")
      if (loc_info.equals("-1")||loc_info.equals("")) {
        li = "{\"fuyi\":\"-1\"}"
      } else {
        str1 = JSON.parseObject(loc_info)
      }
      if (!li.contains("fuyi")) {
        nObject.put("loc_info", str1.toString)
      } else {
        val evs = JSON.parseObject(li)
        nObject.put("loc_info", evs.toString)
      }

      nObject.remove("salesAmount_bak")
      nObject.remove("sellCount180")
      nObject.remove("sellCount90")
      nObject.remove("sellCountText")
      nObject.remove("sellCount_bak")
      nObject.toString
    })
    val fiveDF: DataFrame = spark.read.json(fivemonth)
    fiveDF.printSchema()
     //.registerTempTable("lasttab")


    //5月数据乘系数  +  6月新增数据
//347810
   /* val newSixMonth = spark.sql(
      """
        |
        |select
        |good_id,
        |sellCount,
        |salesAmount
        |from
        |tab
        |order by salesAmount
        |--limit 297810
      """.stripMargin)

    println(newSixMonth.count())

    val fivemonth = spark.sql(
      """
        |
        |select
        |good_id,
        |--cast(ceil(sellCount * 0.97) as bigint) as sells,
        |--sellCount*priceText * 0.97 as salesAmount
        |sellCount,
        |salesAmount
        |from
        |lasttab
        |order by salesAmount
        |--limit 710163
      """.stripMargin)//.withColumnRenamed("sells","sellCount")

    println(fivemonth.count())*/


    val allData: Dataset[Row] = fiveDF.union(sixDF).dropDuplicates("good_id")
//    val allData: Dataset[Row] = fivemonth.union(sixmonth).dropDuplicates("good_id")
    allData.registerTempTable("all")


    //按分类进行调整店铺数和整体销量和销售额
    /*val meishiCateDF = spark.sql(
      """
        |
        |select
        |*
        |from
        |all
        |where firstCategoryId not in ('10031','10027')
        |
      """.stripMargin)


    val otherCateDF = spark.sql(
      """
        |select
        |*
        |from
        |(
        |select
        |*,
        |row_number() over(partition by firstCategoryId order by salesAmount desc) as rank
        |from
        |all
        |where firstCategoryId = '10031'
        |) where rank < 102613
        |
      """.stripMargin).drop("rank")

    val otherCateDF1 = spark.sql(
      """
        |select
        |*
        |from
        |(
        |select
        |*,
        |row_number() over(partition by firstCategoryId order by salesAmount desc) as rank
        |from
        |all
        |where firstCategoryId = '10027'
        |) where rank < 128014
        |
      """.stripMargin).drop("rank")*/
    //按店铺进行调整店铺数和整体销量和销售额

    val meishiDF = spark.sql(
      """
        |
        |select
        |*
        |from
        |all
        |where firstCategoryId = '10028'
        |
      """.stripMargin)

    val otherShopDF = spark.sql(
      """
        |
        |select
        |*
        |from
        |all
        |where shopId in (
        |select
        |shopId
        |from
        |(
        |select
        |shopId,
        |sum(salesAmount) as salesAmount
        |from
        |all
        |where firstCategoryId != '10028'
        |group by shopId
        |)
        |order by salesAmount desc
        |limit 55000
        |)
        |
      """.stripMargin)


//    val value: Dataset[Row] = meishiCateDF.union(otherCateDF).union(otherCateDF1)
val value1: RDD[String] = otherShopDF.toJSON.rdd.map(lines => {
  val nObject: JSONObject = JSON.parseObject(lines)
  val sellCount = nObject.getOrDefault("sellCount", "0").toString
  val salesAmount = nObject.getOrDefault("salesAmount", "0").toString
  val priceText = nObject.getOrDefault("priceText", "0").toString

  val sellCountnew = (sellCount.toInt * 0.53).toInt
  val salesAmountnew = (sellCountnew * priceText.toDouble).formatted("%.2f")
  //      val salesAmountnew = (salesAmount.toDouble * 0.83).formatted("%.2f")
  nObject.put("sellCount", sellCountnew)
  nObject.put("salesAmount", salesAmountnew)

  nObject.toString
})
    val frame: DataFrame = spark.read.json(value1)

    val value: Dataset[Row] = meishiDF.union(frame)






    val value2: RDD[String] = value.toJSON.rdd.map(lines => {
      val nObject: JSONObject = JSON.parseObject(lines)
      nObject.put("timeStamp", "1593446400")
      nObject.toString
    })


    spark.read.json(value2).write.orc("s3a://o2o-dataproces-group/zsc/2020/6/mttg/good_new")

    spark.read.json(value2).registerTempTable("newalltab")
    println("总额")
    spark.sql(
      """
        |
        |select
        |count(1),
        |sum(sellCount),
        |sum(salesAmount)
        |from
        |newalltab
      """.stripMargin).show()

//   查询没事分类下的零售额和销量
    println("美食总额")
    spark.sql(
      """
        |
        |select
        |count(1),
        |sum(sellCount),
        |sum(salesAmount)
        |from
        |newalltab
        |where firstCategoryId='10028'
      """.stripMargin).show()
    println("总店铺数")
    spark.sql(
      """
        |
        |select
        |count(distinct shopId)
        |from
        |newalltab
      """.stripMargin).show()
    println("总美食店铺数")
    spark.sql(
      """
        |
        |select
        |count(distinct shopId)
        |from
        |newalltab
        |where firstCategoryId='10028'
      """.stripMargin).show()


    println("各个省份的销售额情况")
    spark.sql(
      """
        |
        |select
        |province,
        |sum(sellCount) as sellCount,
        |sum(salesAmount) as salesAmount
        |from
        |newalltab
        |group by province
        |order by salesAmount desc
      """.stripMargin).show(50)


    println("各个省份的销售额情况")
    spark.sql(
      """
        |
        |select
        |province,
        |sum(sellCount) as sellCount,
        |sum(salesAmount) as salesAmount
        |from
        |newalltab
        |group by province
        |order by salesAmount desc
      """.stripMargin).show(50)














    /*spark.read.orc(newAddPath).registerTempTable("meishitab")

    spark.sql(
      """
        |
        |select
        |count(1) ct,
        |sum(sellCount/10000) sellCount,
        |sum(sellCount*priceText/10000) salesAmount
        |from
        |meishitab
        |where sellCount < 100
      """.stripMargin).show()
*/

//    spark.read.orc(goodPath).printSchema()

   /* val value: RDD[String] = spark.read.orc(goodPath).toJSON.rdd.map(line => {
      val lines = JSON.parseObject(line)

      //evaluates为string  入库时转为object对象
      val ev = lines.getOrDefault("evaluates", "-1").toString
      lines.put("evaluates", ev)

      val li = lines.getOrDefault("loc_info", "-1").toString
      lines.put("loc_info", li)

      val flavors = lines.getOrDefault("flavors", "-1").toString
      lines.put("flavors", flavors)

      lines.remove("shop_pre_md5")
      lines.remove("md5_id")
      lines.remove("pre_md5")
      lines.remove("platformName_spelling")
      lines.remove("md5_shopId")
      lines.remove("type_xichen")
      lines.remove("gehu_type")
      lines.remove("promotion_info")
      lines.remove("is_pingou")

      lines.toString
    })
    spark.read.json(value).write.orc(goodtestPath)*/



    /*spark.read.orc(goodAllPath).registerTempTable("taball")
    spark.read.orc(lastgoodPath).registerTempTable("lasttab")

    println("本月店铺数量")
    spark.sql(
      """
        |
        |select
        |count(distinct shopId)
        |from
        |tab
      """.stripMargin).show()
    println("上月店铺数量")
    spark.sql(
      """
        |
        |select
        |count(distinct shopId)
        |from
        |lasttab
      """.stripMargin).show()

    println("本月All")
    spark.sql(
      """
        |
        |select
        |count(1) ct,
        |sum(salesAmount),
        |sum(sellCount)
        |from
        |taball
      """.stripMargin).show()
    println("本月")
    spark.sql(
      """
        |
        |select
        |count(1) ct,
        |sum(salesAmount),
        |sum(sellCount)
        |from
        |tab
      """.stripMargin).show()

    println("上月")
    spark.sql(
      """
        |
        |select
        |count(1) ct,
        |sum(salesAmount),
        |sum(sellCount)
        |from
        |lasttab
      """.stripMargin).show()
    println("省为0的条数")
    spark.sql(
      """
        |
        |select
        |count(1) ct
        |from
        |tab
        |where province='0'
      """.stripMargin).show()
    println("10028美食分类的量")
    spark.sql(
      """
        |
        |select
        |count(1) ct,
        |sum(salesAmount),
        |sum(sellCount)
        |from
        |tab
        |where firstCategoryId='10028'
      """.stripMargin).show()
    println("所有分类的量")
    spark.sql(
      """
        |
        |select
        |count(1) ct,
        |sum(salesAmount),
        |sum(sellCount),
        |firstCategoryId
        |from
        |tab
        |group by firstCategoryId
        |order by ct desc
      """.stripMargin).show()*/
  sc.stop()
}
}
