import com.alibaba.fastjson.{JSON, JSONObject}
import com.o2o.utils.Iargs
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.SparkSession


/**
  * @ Auther: o2o-rd-0008
  * @ Date:   2020/6/5 16:23
  * @ Param:  ${PARAM}
  * @ Description: 
  */
object CheckMTTGObsData {
  def main(args: Array[String]): Unit = {

    val spark = SparkSession.builder()
      .appName(s"${this.getClass.getSimpleName}")
      .config("spark.debug.maxToStringFields", "2000")
      .config("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
//      .config("spark.sql.caseSensitive", "true")
      .config("es.nodes", "192.168.1.29")
      .config("es.port", "9200")
      .config("cluster.name","O2OElastic")
      .config("es.net.http.auth.user", "elastic")
      .config("es.net.http.auth.pass", "changeme")
      .master("local[*]")
      .getOrCreate()

    val sc = spark.sparkContext
    sc.hadoopConfiguration.set("fs.s3a.access.key", Iargs.OBSACCESS)
    sc.hadoopConfiguration.set("fs.s3a.secret.key", Iargs.OBSSECRET)
    sc.hadoopConfiguration.set("fs.s3a.endpoint", Iargs.OBSENDPOINT)
    sc.setLogLevel("WARN")
    var year ="2020"
    var month = "8"
    var platform = "mttg"

    //good路径
    var goodPath = s"s3a://o2o-dataproces-group/zsc/${year}/${month}/${platform}/good_sell_add_new"
    var good1Path = s"s3a://dws-data/g_data/2019/8/meituan_tg/"
    var goodnewAddPath = s"s3a://o2o-dataproces-group/zsc/${year}/${month}/${platform}/newAdd"
//    var goodmeishiPath = s"s3a://o2o-dataproces-group/zsc/${year}/${month}/${platform}/meishi1/good"
//    var goodAllPath = s"s3a://o2o-dataproces-group/zsc/${year}/${month}/${platform}/good_all"
//    var goodtestPath = s"s3a://o2o-dataproces-group/zsc/${year}/${month}/${platform}/good_test"
//    var newAddPath = s"s3a://o2o-dataproces-group/zsc/${year}/${month}/${platform}/newAdd"
    var lastgoodPath = s"s3a://dws-data/g_data/2020/6/meituan_tg/"

    val index6 = "247_2020_mttg/mttg_2020_8"

    var onepath = s"s3a://dws-data/split/split_data/2020/meituan/7/"
    var twopath = s"s3a://dws-data/g_data/2019/12/meituan_tg/"

    val path1 = "s3a://o2o-dataproces-group/zsc/2020/8/mttg/good_sell_add_new/"
    val path2 = "s3a://o2o-dataproces-group/zsc/2020/7/mttg/good_six_meishi_split/"

    val ppp = "s3a://o2o-dimension-table/address_table/address_table_2020/7/address_platform/meituan_tg_address_2020_7/"


    val path = "s3a://dws-data/g_data/2020/7/meituan_tg/"

    import org.elasticsearch.spark._
    val data6: RDD[String] = sc.esJsonRDD(index6,
      """
        |{
        |  "query": {
        |    "match_phrase": {
        |      "firstCategoryId": "10028"
        |    }
        |  }
        |}
      """.stripMargin).values


    spark.read.json(data6).toJSON.rdd.map(lines=>{
      val nObject: JSONObject = JSON.parseObject(lines)
      val priceText: Double = nObject.getDouble("priceText")
      val sellCount: Integer = nObject.getInteger("sellCount")

      val sellCount_new: Int = (sellCount*1.15).toInt

      val salesAmount = priceText*sellCount_new
      nObject.put("sellCount",sellCount_new)
      nObject.put("salesAmount",salesAmount)

      nObject
    }).saveToEs(index6,Map("es.mapping.id"->"good_id"))

    /*spark.read.orc(path).registerTempTable("tab")

    spark.sql(
      """
        |select
        |province,
        |count(1),
        |sum(sellCount) sell,
        |sum(salesAmount) sales
        |from tab
        |group by province
        |order by sales desc
      """.stripMargin).show(40,false)*/



  /*  spark.sql(
      """
        |select
        |shopId,
        |shopName,
        |province,
        |city,
        |sum(sellCount),
        |sum(salesAmount) as sales
        |from
        |taball
        |where firstCategoryId='10028'
        |group by shopId,shopName,province,city
        |order by sales desc
      """.stripMargin).show(30)*/

    //    spark.read.orc(good1Path).registerTempTable("lasttaball")

    /*val value: RDD[String] = spark.read.orc(path1).toJSON.rdd.map(line => {
      val nObject: JSONObject = JSON.parseObject(line)
      val shopId: String = nObject.getString("shopId")
      var sellCount: Integer = nObject.getInteger("sellCount")
      var salesAmount: Double = nObject.getDouble("salesAmount")
      var priceText = nObject.getDouble("priceText")

      if (shopId.equals("184572390")){
        sellCount = (sellCount / 2).toInt
        salesAmount = sellCount * priceText
      }

      /*if (shopId.equals("40606624")) {
        sellCount = (sellCount / 16).toInt
        salesAmount = sellCount * priceText
      } else if (shopId.equals("165135665")) {
        sellCount = (sellCount / 10).toInt
        salesAmount = sellCount * priceText
      } else if (shopId.equals("112485581")) {
        sellCount = (sellCount / 9).toInt
        salesAmount = sellCount * priceText
      } else if (shopId.equals("1570925")) {
        sellCount = (sellCount / 7).toInt
        salesAmount = sellCount * priceText
      } else if (shopId.equals("912568")) {
        sellCount = (sellCount / 6).toInt
        salesAmount = sellCount * priceText
      } else if (shopId.equals("159140395")
        | shopId.equals("880592217") || shopId.equals("75214021") || shopId.equals("4932356")
        | shopId.equals("40862356")) {
        sellCount = (sellCount / 4).toInt
        salesAmount = sellCount * priceText
      } else if (shopId.equals("159188044") || shopId.equals("157197921")
        | shopId.equals("4028794") || shopId.equals("42275438") || shopId.equals("159500329")
        | shopId.equals("4498559") || shopId.equals("92900292") || shopId.equals("1400099")
        | shopId.equals("2086091") || shopId.equals("695744832")) {
        sellCount = (sellCount / 3).toInt
        salesAmount = sellCount * priceText
      }*/
      nObject.put("sellCount", sellCount)
      nObject.put("salesAmount", salesAmount)
      nObject.toString

    })
    spark.read.json(value).write.orc("s3a://o2o-dataproces-group/zsc/2020/8/mttg/good_sell_final/")*/


 /*   val frame: DataFrame = spark.sql(
      """
select
good_id,
shopId,
shopName,
province,
city,
priceText,
sellCount,
salesAmount
from
tab
where shopId in ('40606624','165135665','112485581','1570925','912568',
'159140395','880592217','75214021','4932356','40862356',
'159188044','157197921','4028794','42275438','159500329')
      """.stripMargin)
    frame.repartition(1).write.option("header",true).csv("D:\\mtttg\\file")*/
//
//    spark.sql(
//      """
//        |select
//        |shopId,
//        |shopName,
//        |province,
//        |city,
//        |sum(sellCount),
//        |sum(salesAmount) as sales
//        |from
//        |tab
//        |where firstCategoryId='10028'
//        |group by shopId,shopName,province,city
//        |order by sales desc
//      """.stripMargin).show(30)


//    spark.sql(
//      """
//        |select
//        |platformid
//        |from
//        |tab
//        |group by platformid
//      """.stripMargin).show()

    /*spark.sql(
      """
        |select
        |b.shopId,
        |b.shopName,
        |b.province,
        |b.city,
        |sum(b.sellCount) as sell,
        |sum(b.salesAmount) as sell11,
        |sum(a.salesAmount) as sales
        |from
        |taball a
        |left join
        |lasttaball b
        |on a.shopId=b.shopId
        |where a.firstCategoryId='10028'
        |group by b.shopId,b.shopName,b.province,b.city
        |order by sales desc
      """.stripMargin).show(20)*/

 /*   spark.sql(
      """
        |select
        |b.shopId,
        |b.shopName,
        |b.province,
        |b.city,
        |sum(b.sellCount) as sell,
        |sum(b.salesAmount) as sales
        |from
        |taball a
        |left join
        |lasttaball b
        |on a.shopId=b.shopId
        |where a.firstCategoryId='10028'
        |group by b.shopId,b.shopName,b.province,b.city
        |order by sales desc
      """.stripMargin).show(20)*/

    /*spark.sql(
      """
        |select
        |shopId,
        |shopName,
        |province,
        |city,
        |sum(sellCount) as sell,
        |sum(salesAmount) as sales
        |from
        |taball
        |where firstCategoryId='10028'
        |group by shopId,shopName,province,city
        |order by sales desc
      """.stripMargin).show(20)


    spark.sql(
      """
        |select
        |shopId,
        |shopName,
        |province,
        |city,
        |sum(sellCount) as sell,
        |sum(salesAmount) as sales
        |from
        |lasttaball
        |where firstCategoryId='10028'
        |group by shopId,shopName,province,city
        |order by sales desc
      """.stripMargin).show(20)*/



//    spark.read.json(ppp).selectExpr("city","district","aedzId").where("city='东营市'").distinct().show()


    /*val sixmeishiDF: DataFrame = spark.read.json(path2).drop("timeStamp").withColumn("timeStamp",lit("1596038400"))

    val frame: DataFrame = spark.read.orc(path1)

    sixmeishiDF.registerTempTable("sixtab")

    frame.registerTempTable("tab")

    val result = spark.sql(
      """
        |select
        |a.*
        |from
        |sixtab a
        |left join
        |tab b
        |on a.good_id=b.good_id
        |where b.good_id is null
      """.stripMargin)

    result.registerTempTable("res")

    spark.sql(
      """
        |select
        |count(1),
        |count(distinct shopId),
        |sum(sellCount),
        |sum(salesAmount)
        |from
        |res
      """.stripMargin).show(false)*/





    /*val lastDf: DataFrame = spark.read.orc(lastgoodPath).where("firstCategoryId='10028'")

    lastDf.repartition(1).write.json("s3a://o2o-dataproces-group/zsc/2020/7/mttg/good_six_meishi")

    lastDf.registerTempTable("lasttab")

    println("===6月份===美食店铺")
    spark.sql(
      """
        |select
        |count(1),
        |count(distinct shopId),
        |sum(sellCount),
        |sum(salesAmount)
        |from
        |lasttab
      """.stripMargin).show()*/
    /*val meishiDf = spark.read.json("s3a://o2o-dataproces-group/zsc/2020/7/mttg/good_six_meishi")

    meishiDf.registerTempTable("meishi")

    val meishiSplDf = spark.sql(
      """
        |select
        |*
        |from
        |meishi
        |order by salesAmount
        |limit 500000
      """.stripMargin)

    meishiSplDf.repartition(1).write.json("s3a://o2o-dataproces-group/zsc/2020/7/mttg/good_six_meishi_split")

    meishiSplDf.registerTempTable("meishitab")

    spark.sql(
      """
        |select
        |count(1),
        |count(distinct shopId),
        |sum(sellCount),
        |sum(salesAmount)
        |from
        |meishitab
        |
      """.stripMargin).show()*/




    /*val data6: RDD[String] = spark.read.orc("s3a://o2o-dataproces-group/zsc/2020/6/mttg/good_new").toJSON.rdd.map(lines => {
      val nObject: JSONObject = JSON.parseObject(lines)
      nObject.toString
    })*/

//    import org.elasticsearch.spark._
//    val data6: RDD[String] = sc.esJsonRDD(index6).values
//

//      .filter(lines => {
//        val nObject: JSONObject = JSON.parseObject(lines)
//        val shopId: String = nObject.getString("shopId")
//        var flag = false
//        if (shopId.equals("1660505") || shopId.equals("194965020") || shopId.equals("91120381") || shopId.equals("4423467")) {
//          flag = true
//        }
//        flag
//      })
    /*val value: RDD[String] = data6.map(lines => {
      val nObject: JSONObject = JSON.parseObject(lines)
      if (nObject.getString("shopId").equals("1660505")) {

        val sell = (nObject.getString("sellCount").toInt * 0.35).toInt
        nObject.put("sellCount", sell)
        nObject.put("salesAmount", (sell * nObject.getString("priceText").toDouble).formatted("%.2f"))

      } else if (nObject.getString("shopId").equals("194965020")) {
        val sell = (nObject.getString("sellCount").toInt * 0.5).toInt
        nObject.put("sellCount", sell)
        nObject.put("salesAmount", (sell * nObject.getString("priceText").toDouble).formatted("%.2f"))
      } else if (nObject.getString("shopId").equals("91120381")) {
        val sell = (nObject.getString("sellCount").toInt * 0.5).toInt
        nObject.put("sellCount", sell)
        nObject.put("salesAmount", (sell * nObject.getString("priceText").toDouble).formatted("%.2f"))
      } else if (nObject.getString("shopId").equals("4423467")) {
        val sell = (nObject.getString("sellCount").toInt * 0.35).toInt
        nObject.put("sellCount", sell)
        nObject.put("salesAmount", (sell * nObject.getString("priceText").toDouble).formatted("%.2f"))
      }
      nObject.toString
    })
    spark.read.json(value).repartition(8).write.orc("s3a://o2o-dataproces-group/zsc/2020/6/mttg/good_gai")
    spark.read.json(value).registerTempTable("tab")

    spark.sql(
      """
        |
        |select
        |shopId,
        |sum(sellCount),
        |sum(salesAmount) sales
        |from
        |tab
        |group by shopId
        |order by sales desc
      """.stripMargin).show(30)*/





















    /*spark.read.orc(newAddPath).registerTempTable("meishitab")

    spark.sql(
      """
        |
        |select
        |count(1) ct,
        |sum(sellCount/10000) sellCount,
        |sum(sellCount*priceText/10000) salesAmount
        |from
        |meishitab
        |where sellCount < 100
      """.stripMargin).show()
*/

//    spark.read.orc(goodPath).printSchema()

   /* val value: RDD[String] = spark.read.orc(goodPath).toJSON.rdd.map(line => {
      val lines = JSON.parseObject(line)

      //evaluates为string  入库时转为object对象
      val ev = lines.getOrDefault("evaluates", "-1").toString
      lines.put("evaluates", ev)

      val li = lines.getOrDefault("loc_info", "-1").toString
      lines.put("loc_info", li)

      val flavors = lines.getOrDefault("flavors", "-1").toString
      lines.put("flavors", flavors)

      lines.remove("shop_pre_md5")
      lines.remove("md5_id")
      lines.remove("pre_md5")
      lines.remove("platformName_spelling")
      lines.remove("md5_shopId")
      lines.remove("type_xichen")
      lines.remove("gehu_type")
      lines.remove("promotion_info")
      lines.remove("is_pingou")

      lines.toString
    })
    spark.read.json(value).write.orc(goodtestPath)*/



//    spark.read.orc(goodPath).registerTempTable("taball")
//    spark.read.orc(lastgoodPath).registerTempTable("lasttab")

//    spark.read.orc(onepath).registerTempTable("one")
//    spark.read.orc(twopath).registerTempTable("two")

//    spark.read.orc(goodnewAddPath).registerTempTable("newtab")

    /*spark.sql(
      """
        |select
        |sum(a.sellCount) as sella,
        |sum(b.sellCount) as sellb
        |from
        |taball a
        |left join
        |lasttab b
        |on a.good_id=b.good_id
        |where a.firstCategoryId='10028' and b.firstCategoryId='10028'
        |and b.good_id is not null
      """.stripMargin).show()*/

    /*val sevDf = spark.sql(
      """
        |select
        |shopId,
        |shopName,
        |sum(sellCount),
        |sum(salesAmount) as sales,
        |province,
        |city,
        |district,
        |address
        |from
        |one
        |where shopName like '%中百仓储%'
        |group by shopId,shopName,address,province,city,district
        |order by sales desc
        |limit 20
      """.stripMargin)
    sevDf.repartition(1).write.csv("D://mttg/seven/shop4")

    val sev1Df = spark.sql(
      """
        |select
        |shopId,
        |shopName,
        |sum(sellCount),
        |sum(salesAmount) as sales,
        |province,
        |city,
        |district,
        |address
        |from
        |two
        |where shopName like '%中百仓储%'
        |group by shopId,shopName,address,province,city,district
        |order by sales desc
        |limit 20
      """.stripMargin)
    sev1Df.repartition(1).write.csv("D://mttg/seven/shop5")*/
    /*val sevDf = spark.sql(
      """
        |select
        |shopId,
        |shopName,
        |sum(sellCount),
        |sum(salesAmount) as sales,
        |province,
        |city,
        |district,
        |address
        |from
        |taball
        |group by shopId,shopName,address,province,city,district
        |order by sales desc
        |limit 20
      """.stripMargin)

    sevDf.repartition(1).write.csv("D://mttg/seven/shop2")



    val sixDf = spark.sql(
      """
        |select
        |shopId,
        |shopName,
        |sum(sellCount),
        |sum(salesAmount) as sales,
        |province,
        |city,
        |district,
        |address
        |from
        |lasttab
        |group by shopId,shopName,address,province,city,district
        |order by sales desc
        |limit 20
      """.stripMargin)

    sixDf.repartition(1).write.csv("D://mttg/six/shop2")*/

//    println("本月新增数据")

//    spark.sql(
//      """
//        |select
//        |count(1),
//        |sum(sellCount),
//        |sum(salesAmount)
//        |from
//        |newtab
//      """.stripMargin).show()

    /*println("本月店铺数量")
    spark.sql(
      """
        |
        |select
        |count(distinct shopId)
        |from
        |taball
      """.stripMargin).show()
    println("本月美食店铺数量")
    spark.sql(
      """
        |
        |select
        |count(distinct shopId)
        |from
        |taball
        |where firstCategoryId='10028'
      """.stripMargin).show()

    println("本月")
    spark.sql(
      """
        |
        |select
        |count(1) ct,
        |sum(salesAmount),
        |sum(sellCount)
        |from
        |taball
      """.stripMargin).show()

    println("省为0的条数")
    spark.sql(
      """
        |
        |select
        |count(1) ct
        |from
        |taball
        |where province='0'
      """.stripMargin).show()
    println("10028美食分类的量")
    spark.sql(
      """
        |
        |select
        |count(1) ct,
        |sum(salesAmount),
        |sum(sellCount)
        |from
        |taball
        |where firstCategoryId='10028'
      """.stripMargin).show()
    println("所有分类的量")
    spark.sql(
      """
        |
        |select
        |count(1) ct,
        |sum(salesAmount),
        |sum(sellCount),
        |firstCategoryId
        |from
        |taball
        |group by firstCategoryId
        |order by ct desc
      """.stripMargin).show()*/
  sc.stop()
}
}
