package com.o2o.cleaning.month.platform.ebusiness_plat.yangmatou

import com.o2o.utils.Iargs
import org.apache.spark.sql._
/**
  * @ Auther: o2o-rd-0008
  * @ Date:   2020/6/5 16:23
  * @ Param:  ${PARAM}
  * @ Description: 
  */
object CheckMTTGObsData {
  def main(args: Array[String]): Unit = {

    val spark = SparkSession.builder()
      .appName(s"${this.getClass.getSimpleName}")
      .config("spark.debug.maxToStringFields", "2000")
      .config("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
//      .config("spark.sql.caseSensitive", "true")
//      .config("es.nodes", "192.168.2.247")
//      .config("es.port", "9200")
//      .config("cluster.name","O2OElastic")
//      .config("es.net.http.auth.user", "elastic")
//      .config("es.net.http.auth.pass", "changeme")
      .master("local[*]")
      .getOrCreate()

    val sc = spark.sparkContext
    sc.hadoopConfiguration.set("fs.s3a.access.key", Iargs.OBSACCESS)
    sc.hadoopConfiguration.set("fs.s3a.secret.key", Iargs.OBSSECRET)
    sc.hadoopConfiguration.set("fs.s3a.endpoint", Iargs.OBSENDPOINT)
    sc.setLogLevel("WARN")
    var year ="2020"
    var month = "7"
    var platform = "mttg"

    //good路径
    var goodPath = s"s3a://o2o-dataproces-group/zsc/${year}/${month}/${platform}/good_sell_add"
    var goodnewAddPath = s"s3a://o2o-dataproces-group/zsc/${year}/${month}/${platform}/newAdd"
    var ceshiPath = s"s3a://o2o-dataproces-group/zsc/${year}/${month}/${platform}/ceshi_da"
//    var goodmeishiPath = s"s3a://o2o-dataproces-group/zsc/${year}/${month}/${platform}/meishi1/good"
//    var goodAllPath = s"s3a://o2o-dataproces-group/zsc/${year}/${month}/${platform}/good_all"
//    var goodtestPath = s"s3a://o2o-dataproces-group/zsc/${year}/${month}/${platform}/good_test"
//    var newAddPath = s"s3a://o2o-dataproces-group/zsc/${year}/${month}/${platform}/newAdd"
    var lastgoodPath = s"s3a://dws-data/g_data/2020/7/meituan_tg/"

    val index6 = "2020_mttg_7"

    val path1 = "s3a://o2o-dataproces-group/zsc/2020/7/mttg/good_sell_add/"
    val path2 = "s3a://o2o-dataproces-group/zsc/2020/7/mttg/good_six_meishi_split_final/"

    val goodPa = "s3a://o2o-dataproces-group/zsc/2020/7/mttg/good/"

    spark.read.orc(lastgoodPath).registerTempTable("taball")

    spark.sql(
      """
        |select
        |timeStamp
        |from
        |taball
        |group by timeStamp
      """.stripMargin).show()

    println("本月")
    spark.sql(
      """
        |
        |select
        |count(1) ct,
        |count(distinct shopId),
        |sum(salesAmount),
        |sum(sellCount)
        |from
        |taball
      """.stripMargin).show()
    println("省为0的条数")
    spark.sql(
      """
        |
        |select
        |count(1) ct
        |from
        |taball
        |where province='0'
      """.stripMargin).show()
    println("10028美食分类的量")
    spark.sql(
      """
        |
        |select
        |count(1) ct,
        |count(distinct shopId),
        |sum(salesAmount),
        |sum(sellCount)
        |from
        |taball
        |where firstCategoryId='10028'
      """.stripMargin).show()
    println("所有分类的量")
    spark.sql(
      """
        |
        |select
        |count(1) ct,
        |sum(salesAmount),
        |sum(sellCount),
        |firstCategoryId
        |from
        |taball
        |group by firstCategoryId
        |order by ct desc
      """.stripMargin).show()


    /*val frame: DataFrame = spark.read.orc(path1)
    val value: RDD[String] = frame.toJSON.rdd.map(lines => {
      val nObject: JSONObject = JSON.parseObject(lines)

      var flavors = nObject.get("flavors").toString
      var is_tuan = nObject.get("is_tuan").toString

      nObject.put("flavors", flavors)
      nObject.put("is_tuan", is_tuan)
      nObject.put("timeStamp", "1596038400")
      nObject.toString
    })



    val meishiFrame: DataFrame = spark.read.json(path2).drop("timeStamp").withColumn("timeStamp",lit("1596038400"))
      .selectExpr("address",
        "administrative_region",
        "aedzId",
        "average_consume",
        "backCateName",
        "buildShopId",
        "categoryId",
        "categoryName",
        "city",
        "city_grade",
        "city_origin",
        "crawl_date",
        "district",
        "district_origin",
        "dpShopId",
        "economic_division",
        "emotionalKeywords",
        "evaluates",
        "firstCategoryId",
        "flavors",
        "food_type",
        "fourthCategoryId",
        "goodRatePercentage",
        "good_id",
        "if_city",
        "if_district",
        "if_state_level_new_areas",
        "images",
        "is_tuan",
        "latitude",
        "loc_info",
        "longitude",
        "opening_hours",
        "original_cost",
        "phone",
        "platformId",
        "platformName",
        "poor_counties",
        "priceText",
        "province",
        "regional_ID",
        "rootCategoryId",
        "rootCategoryName",
        "rural_demonstration_counties",
        "rural_ecommerce",
        "salesAmount",
        "secondCategoryId",
        "sellCount",
        "shopAvgscore",
        "shopCommentCount",
        "shopId",
        "shopImages",
        "shopName",
        "shopSellCount",
        "shopUrl",
        "the_belt_and_road_city",
        "the_belt_and_road_province",
        "the_yangtze_river_economic_zone_city",
        "the_yangtze_river_economic_zone_province",
        "thirdCategoryId",
        "timeStamp",
        "title",
        "town",
        "urban_agglomerations")


    val goodFrame: DataFrame = spark.read.json(value).selectExpr("address",
      "administrative_region",
      "aedzId",
      "average_consume",
      "backCateName",
      "buildShopId",
      "categoryId",
      "categoryName",
      "city",
      "city_grade",
      "city_origin",
      "crawl_date",
      "district",
      "district_origin",
      "dpShopId",
      "economic_division",
      "emotionalKeywords",
      "evaluates",
      "firstCategoryId",
      "flavors",
      "food_type",
      "fourthCategoryId",
      "goodRatePercentage",
      "good_id",
      "if_city",
      "if_district",
      "if_state_level_new_areas",
      "images",
      "is_tuan",
      "latitude",
      "loc_info",
      "longitude",
      "opening_hours",
      "original_cost",
      "phone",
      "platformId",
      "platformName",
      "poor_counties",
      "priceText",
      "province",
      "regional_ID",
      "rootCategoryId",
      "rootCategoryName",
      "rural_demonstration_counties",
      "rural_ecommerce",
      "salesAmount",
      "secondCategoryId",
      "sellCount",
      "shopAvgscore",
      "shopCommentCount",
      "shopId",
      "shopImages",
      "shopName",
      "shopSellCount",
      "shopUrl",
      "the_belt_and_road_city",
      "the_belt_and_road_province",
      "the_yangtze_river_economic_zone_city",
      "the_yangtze_river_economic_zone_province",
      "thirdCategoryId",
      "timeStamp",
      "title",
      "town",
      "urban_agglomerations")

//    meishiFrame.printSchema()
//
//    goodFrame.printSchema()


    val allFrame: Dataset[Row] = meishiFrame.union(goodFrame)

    allFrame.registerTempTable("ta")

    spark.sql(
      """
        |select
        |timeStamp
        |from
        |ta
        |group by timeStamp
      """.stripMargin).show()

    allFrame.repartition(4).write.orc("s3a://o2o-dataproces-group/zsc/2020/7/mttg/good_modify/")*/



   /* val sixmeishiDF: DataFrame = spark.read.json(path2).drop("timeStamp").withColumn("timeStamp",lit("1596038400"))

    val frame: DataFrame = spark.read.orc(path1)

    sixmeishiDF.registerTempTable("sixtab")

    frame.registerTempTable("tab")

    val result = spark.sql(
      """
        |select
        |a.*
        |from
        |sixtab a
        |left join
        |tab b
        |on a.good_id=b.good_id
        |where b.good_id is null
      """.stripMargin)

    result.registerTempTable("res")

    spark.sql(
      """
        |select
        |count(1),
        |count(distinct shopId),
        |sum(sellCount),
        |sum(salesAmount)
        |from
        |res
      """.stripMargin).show(false)*/


   /* var onepath = s"s3a://dws-data/g_data/2020/1/meituan_tg/"
    var twopath = s"s3a://dws-data/g_data/2019/12/meituan_tg/"


    //
    val frame: DataFrame = spark.read.json("s3a://o2o-sourcedata/obs-source-2020/7/mttg/mt_tg_detail/")

    frame.registerTempTable("tab")

//    val frame1: DataFrame = spark.read.json("s3a://o2o-dataproces-group/zsc/2020/7/mttg/good_sell_add/")
//
//    frame1.registerTempTable("thistab")

    val frame1: DataFrame = spark.read.orc(lastgoodPath)

    frame1.registerTempTable("lasttab")


    val result = spark.sql(
      """
        |select
        |a.*,
        |a.sellCount-b.sellCount as sell,
        |cast((a.sellCount-b.sellCount)*a.priceText as decimal(20,2)) as salesAmount
        |from
        |tab a
        |left join
        |lasttab b
        |on a.good_id = b.good_id
        |where b.good_id is not null and a.sellCount > b.sellCount*10
      """.stripMargin).drop("sellCount").withColumnRenamed("sell","sellCount")

    result.repartition(1).write.json(ceshiPath)

    result.registerTempTable("res")

    spark.sql(
      """
        |select
        |count(1),
        |count(distinct shopId),
        |sum(sellCount),
        |sum(salesAmount)
        |from
        |res
        |
      """.stripMargin).show(false)
*/
   /* spark.sql(
      """
        |select
        |count(1),
        |count(distinct shopId),
        |sum(sellCount),
        |sum(sellCount*priceText)
        |from
        |tab
      """.stripMargin).show(false)

    spark.sql(
      """
        |select
        |shopId,
        |shopName,
        |sum(sellCount),
        |sum(sellCount*priceText) as sales,
        |address
        |from
        |tab
        |group by shopId,shopName,address
        |order by sales desc
        |limit 20
      """.stripMargin).show(false)*/










   /* val meishiDf = spark.read.json("s3a://o2o-dataproces-group/zsc/2020/7/mttg/good_six_meishi")

    meishiDf.registerTempTable("meishi")

    val meishiSplDf = spark.sql(
      """
        |select
        |*
        |from
        |meishi
        |order by salesAmount
        |limit 600000
      """.stripMargin)

    meishiSplDf.cache()

    meishiSplDf.registerTempTable("meishispl")

    val frame: DataFrame = spark.read.orc(path1)

    frame.registerTempTable("tab")

    val result = spark.sql(
      """
        |select
        |a.*
        |from
        |meishispl a
        |left join
        |tab b
        |on a.good_id=b.good_id
        |where b.good_id is null
      """.stripMargin)

    result.repartition(1).write.json("s3a://o2o-dataproces-group/zsc/2020/7/mttg/good_six_meishi_split_final")

    result.registerTempTable("meishitab")

    spark.sql(
      """
        |select
        |count(1),
        |count(distinct shopId),
        |sum(sellCount),
        |sum(salesAmount)
        |from
        |meishitab
        |
      """.stripMargin).show()*/

  sc.stop()
}
}
