package com.o2o.cleaning.month.platform.ebusiness_plat.meituan_tg

import com.alibaba.fastjson.JSON
import com.mongodb.spark.MongoSpark
import com.o2o.utils.times.TimesYearAll
//import com.o2o.utils.obs.Obs_Conf
import org.apache.spark.SparkContext
import org.apache.spark.sql.functions.lit
import org.apache.spark.sql.{DataFrame, SparkSession}

/**
  * 步骤：1、提取补采信息给采集组
  *      2、合并两部分数据
  *      3、本月数据与上月数据关联  月销量  关联到的相减     关联不到的作为新增
  * @author: gaoyadi
  * @Date: 2018/6/21 10:20
  * @Description:执行时需要修改的变量 month  flag  timeStamp  collection  address
  *    第一步：修改flag参数和数据集合名称，先拉取数据到OBS 同时把需要补采的数据提取出来 等待采集组补采完后 再执行第二步清洗
  *    当flag为bu的时候，原始全量数据备份 以及需要补采的数据提取  此时 collection为全量集合的名称 mt_tg_detail
  *    当flag为all的时候，补采好的数据备份以及数据清洗等  此时 collection为全量集合的名称 mt_tg_detail_bu
  * @Modify By:
  */
object Mttg {

  //----------补采：bu  ------------计算：all----
//  var flag = "bu"
  var flag = "all"

  //平台名称
  var platform = "mttg"
  //当月的月份
  val year = 2020
  var month = 8
  var lastMonth = 7
  var last_lastMonth = 6
  var timeStamp = TimesYearAll.TIME202008  //每个月固定时间戳
  //mongo库中的库名称
  var database = "MT"
  var collection = "mt_tg_detail"

  //补采或者当月原始数据路径
  var sourcePathPart = s"s3a://o2o-sourcedata/obs-source-2020/${month}/${platform}/${collection}"
  //当月所有数据（包括原始的mt_tg_detail和补采的mt_tg_detail_bu两部分数据）
  var sourcePath = s"s3a://o2o-sourcedata/obs-source-2020/${month}/${platform}/${collection}"
//  var sourcePath = s"s3a://o2o-sourcedata/obs-source-2020/${month}/${platform}/*/*"
  //需要补采的路径（给采集组）
  var buPath = s"s3a://o2o-dataproces-group/chen_lixiu/2020/${month}/sourceBuCai/${platform}/bu"
  //上月及上上月原始数据路径
  var lastMonthPath = s"s3a://o2o-sourcedata/obs-source-2020/${lastMonth}/${platform}/*/*"
  var last_lastMonthPath = s"s3a://o2o-sourcedata/obs-source-2020/${last_lastMonth}/${platform}/*/*"

  //上月及上上月计算结果good文件路径  o2o-dataproces-group/chen_lixiu/2019/10/mttg/good/
  var lastGoodPath1 = s"s3a://dws-data/g_data/${year}/${month-1}/meituan_tg/" //上月结果
  var lastGoodPath2 = s"s3a://dws-data/g_data/${year}/${month-2}/meituan_tg/" //上上月结果
  var lastGoodPath3 = s"s3a://dws-data/g_data/${year}/${month-3}/meituan_tg/" //上3个月结果
  var lastGoodPath4 = s"s3a://dws-data/g_data/${year}/${month-4}/meituan_tg/" //上4个月结果
  var lastGoodPath5 = s"s3a://dws-data/g_data/${year}/${month-5}/meituan_tg/" //上5个月结果

  //地址路径
  var address = s"s3a://o2o-dimension-table/address_table/address_table_2020/${month}/address_platform/meituan_tg_address_2020_${month}/"
//  var address = s"s3a://o2o-dimension-table/address_table/address_table_2020/${month}/address_platform/meituan_tg_address_2020_${month}/*"
  //三级id分类路径
  var catePath = "s3a://o2o-dimension-table/category_table/cate/cate0401/mttg/categoryId/*"
  //商品的清洗结果路径
  var resultPath = s"s3a://o2o-dataproces-group/zsc/${year}/${month}/${platform}/good_sell_add_new"
  var sellPath = s"s3a://o2o-dataproces-group/zsc/${year}/${month}/${platform}/sell_good"
  var sell90Path = s"s3a://o2o-dataproces-group/zsc/${year}/${month}/${platform}/sell90_good"
  var sell180Path = s"s3a://o2o-dataproces-group/zsc/${year}/${month}/${platform}/sell180_good"
  //新增数据路径
  var newAddPath = s"s3a://o2o-dataproces-group/zsc/${year}/${month}/${platform}/newAdd"
  //提取的店铺路径
  var shopPath_json = s"s3a://o2o-sourcedata/obs_result_shop/${year}/${month}/${platform}"
  var shopPath_orc = s"s3a://dws-data/g_shop/${year}/${month}/meituan_tg"
  //校验路径
  var strPath = "s3a://o2o-dataproces-group/chen_lixiu/"

  def main(args: Array[String]): Unit = {
      //spark mongo连接配置  保存补采完的数据
      val spark = SparkSession.builder()
//        .master("local[*]")
        .appName("MongoSparkConnectorIntro")
        .config("spark.mongodb.input.uri", "mongodb://root:O2Odata123!@ 192.168.0.149:27017/admin")
        .config("spark.mongodb.input.database", s"${database}")
        .config("spark.mongodb.input.collection", s"${collection}")
        .config("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
        .getOrCreate()
      //obs设置
      var sc: SparkContext = spark.sparkContext
      sc.hadoopConfiguration.set("fs.s3a.access.key", "GAO7EO9FWKPJ8WFCQDME")
      sc.hadoopConfiguration.set("fs.s3a.secret.key", "LZ0xaHBSYKHaJ9ECDbX9f7zin79UZkXfGoNapRPL")
      sc.hadoopConfiguration.set("fs.s3a.endpoint", "https://obs.cn-north-1.myhuaweicloud.com")
      sc.setLogLevel("ERROR")

      //=================================================================
      //===1、拉取补采数据（此处直接读取sc配置文件中的${collection}的数据）
      val rdd = MongoSpark.load(sc)
      val values = rdd.map(line => {
        val nObject = JSON.parseObject(line.toJson())
        nObject.remove("_id")
       //拆分flavour里面的分类，将其扁平化
        val flavors = nObject.getJSONArray("flavors")
        val rootCategoryId = flavors.getJSONObject(0).get("rootCategoryId").toString
        val rootCategoryName = flavors.getJSONObject(0).get("rootCategoryName").toString
        val categoryId = flavors.getJSONObject(0).get("categoryId").toString
        val categoryName = flavors.getJSONObject(0).get("categoryName").toString
        nObject.put("rootCategoryId",rootCategoryId)
        nObject.put("rootCategoryName",rootCategoryName)
        nObject.put("categoryId",categoryId)
        nObject.put("categoryName",categoryName)
        nObject
      })

      values.repartition(1).saveAsTextFile(sourcePathPart)

      //第一步：计算加清洗
      //==================当月数据
      //读取本月全量数据
      val df_mttg = spark.read.json(sourcePathPart).cache()
      //注意跨集合问题（补采集合）存在同一个good_id对应的sellCount不一样，优先计算sellCount不为-1的部分
      df_mttg.where("sellCount != '-1'").registerTempTable("sell")
      df_mttg.where("sellCount90 != '-1'").registerTempTable("sell90")
      df_mttg.where("sellCount180 != '-1'").registerTempTable("sell180")
      //取最大的销量----进行店铺补采的时候，两个集合会存在相同的good_id
      val df_1 = spark.sql(
        """
          |select
          |*
          |from
          |    (select *, row_number() over(partition by good_id order by sellCount desc ) k1 from sell) t
          |where k1=1
      """.stripMargin
      ).drop("k1")
      val df_90 = spark.sql(
        """
          |select
          |*
          |from
          |    (select *, row_number() over(partition by good_id order by sellCount90 desc ) k1 from sell90) t
          |where k1=1
      """.stripMargin
      ).drop("k1")
      val df_180 = spark.sql(
        """
          |select
          |*
          |from
          |    (select *, row_number() over(partition by good_id order by sellCount180 desc ) k1 from sell180) t
          |where k1=1
      """.stripMargin
      ).drop("k1")

      println("====第一部分条数：======"+df_1.count())
      println("====第二部分条数：======"+df_90.count())
      println("====第三部分条数：======"+df_180.count())

      //累计销量计算
      var df_calcu: DataFrame = mttgCaculate(spark, df_1, s"${lastMonthPath}", s"${newAddPath}").cache()
      //90天销量计算
      var df_calcu90: DataFrame = mttgCaculate_90(spark,df_90).where("sellCount>0").where("priceText>0").cache()
      //180天销量
      var df_calcu180: DataFrame = mttgCaculate_180(spark,df_180).where("sellCount>0").where("priceText>0").cache()


    //保留累计销量+半年累计销量
//      val value_all = df_calcu.toJSON.rdd.union(df_calcu180.toJSON.rdd)

    //保留累计销量+季度累计销量+半年累计销量
      val value_all = df_calcu.toJSON.rdd.union(df_calcu90.toJSON.rdd.union(df_calcu180.toJSON.rdd))
      val df_calcu_all = spark.read.json(value_all).where("sellCount>0").where("priceText>0").dropDuplicates("good_id").cache()

    //只保留累计销量
//        val df_calcu_all = df_calcu.where("sellCount>0").where("priceText>0")
//        .dropDuplicates("good_id").cache()

      //第二步：打标签
      var df_label = df_calcu_all
        .withColumn("timeStamp", lit(s"${timeStamp}"))
        .withColumn("platformName", lit("美团团购"))
        .withColumn("platformId", lit("23"))
      //第三步：关联分类
      val df_cate = mttgCate(spark, df_label)
//      第四步：关联地址
      val df_addr = mttgAddress(spark, df_cate, address)

//      第五步：商品数据落地
      df_addr.repartition(1).write.orc(resultPath)

      //释放df_calcu的缓存
      df_calcu.unpersist()
      df_calcu90.unpersist()
      df_calcu180.unpersist()

  }

  /**
    * 从mongo中拉取数据到OBS
    *
    * @param month
    * @param platform
    * @param collection
    */
  def mongoExport(month: Int, platform: String, database: String, collection: String): Unit = {
    //spark mongo连接配置
    val spark = SparkSession.builder()
//              .master("local[*]")
      .appName("MongoSparkConnectorIntro")
      .config("spark.mongodb.input.uri", "mongodb://root:O2Odata123!@ 192.168.0.149:27017/admin")
      .config("spark.mongodb.input.database", s"${database}")
      .config("spark.mongodb.input.collection", s"${collection}")
      .config("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
      .getOrCreate()

    var sc: SparkContext = spark.sparkContext
    sc.hadoopConfiguration.set("fs.s3a.access.key", "GAO7EO9FWKPJ8WFCQDME")
    sc.hadoopConfiguration.set("fs.s3a.secret.key", "LZ0xaHBSYKHaJ9ECDbX9f7zin79UZkXfGoNapRPL")
    sc.hadoopConfiguration.set("fs.s3a.endpoint", "https://obs.cn-north-1.myhuaweicloud.com")

    val rdd = MongoSpark.load(sc)
    val values = rdd.map(line => {
      val nObject = JSON.parseObject(line.toJson())

      //扁平化flavors里面的字段：平台分类
      val flavors = nObject.getJSONArray("flavors")

      val rootCategoryId = flavors.getJSONObject(0).get("rootCategoryId").toString
      val rootCategoryName = flavors.getJSONObject(0).get("rootCategoryName").toString
      val categoryId = flavors.getJSONObject(0).get("categoryId").toString
      val categoryName = flavors.getJSONObject(0).get("categoryName").toString

      nObject.put("rootCategoryId",rootCategoryId)
      nObject.put("rootCategoryName",rootCategoryName)
      nObject.put("categoryId",categoryId)
      nObject.put("categoryName",categoryName)

//      //11月份元数据采集到了sellCount90和sellCount180两个字段，如果sellcount为-1，则进一步提取
//      var sell = nObject.get("sellCount").toString
//      if(sell.equals("-1")){
//        val sell90 = nObject.get("sellCount90").toString
//        if(sell90.equals("-1")){
//          val sell180 = nObject.get("sellCount180").toString.toInt
//          //sellCount180为6个月销量，除以6
//          sell = (sell180/6).toString
//        }else{
//          //sellCount90为3个月销量，除以3
//          sell = (sell90.toInt/3).toString
//        }
//      }

      //所有字段转为string
      /*val keys = nObject.keySet().iterator()
      while (keys.hasNext){
        var key = keys.next()
        nObject.put(key,nObject.get(key).toString)
      }*/

      nObject.remove("_id")
      nObject
    })
      .cache()

    //1、备份原始数据mt_tg_detail   6月份以后改为orc格式存储
    values.repartition(1).saveAsTextFile(sourcePathPart)

    //2、提取需要补采的信息，到自己路径的sourceBuCai/bu,给采集组
//    mttgBuCai(spark, sourcePathPart, lastMonthPath).repartition(1).write.json(s"${buPath}")
  }

  /** *
    * 提取需要补采的数据信息
    *
    * @param spark
    * @param sourcePathPart    当月数据
    * @param lastMonthPath 上月数据
    * @return
    */
  def mttgBuCai(spark: SparkSession, sourcePathPart: String, lastMonthPath: String): DataFrame = {

    spark.read.json(s"${sourcePathPart}").where("sellCount>0").registerTempTable("table_dang")

    //上月数据
    //作用:当月数据的销量与上月数据中的销量量作差   没能作减的数据提出上月的数据补采
    spark.read.json(s"${lastMonthPath}").where("sellCount>0").registerTempTable("FEB1_0")

    //上月数据中有补采的重复数据：注意：此处取第一次的shop信息 !!!
    spark.sql(
      """
        |select
        |*
        |from
        |     (select
        |      *,
        |      row_number() over(partition by good_id order by sellCount) g
        |      from
        |      FEB1_0 ) t
        |where g=1
      """.stripMargin
    ).registerTempTable("table_shang")

    /**
      * 提取上月有，当月没有的，提供shop_id给采集组，让他们取补采
      */
    var frame = spark.sql(
      """
        |select
        |a.shopUrl,
        |a.shopImages,
        |a.shopName,
        |a.shopAvgscore,
        |a.shopCommentCount,
        |a.shopId,
        |a.shopSellCount,
        |a.flavors,
        |a.average_consume,
        |--11.27号添加该字段进行补采
        |a.loc_info
        |from
        |table_shang a
        |left join
        |table_dang b
        |on a.good_id=b.good_id
        |where b.good_id is NULL
      """.stripMargin
    ).dropDuplicates("shopId")

    frame
  }

  /** *
    * 商品计算，
    * 关联上的数据相减 取有效数据(销量大于0) 关联不上的数据当做新增 同时把数据中的分类ID和name提取出来
    *
    * @param spark
    * @param   dataFrame  当月数据
    * @param lastMonthPath 上月数据
    * @param newAddPath    新增数据
    * @return
    */


  //有累计销量计算规则sellCount != '-1'
  def mttgCaculate(spark: SparkSession, dataFrame: DataFrame,lastMonthPath: String, newAddPath: String): DataFrame = {

    dataFrame.registerTempTable("month_6")
    //=================上月数据
    spark.read.json(lastMonthPath).where("sellCount !='-1'").registerTempTable("month_test2")
    spark.sql(
      """
        |select
        |*
        |from
        |  (select
        |  *,
        |  row_number() over(partition by good_id order by sellCount desc) g
        |  from
        |  month_test2 )t
        |where g=1
      """.stripMargin
    ).drop("g").registerTempTable("month_5")

    //关联上的数据   注意sellcount是bigint类型的整数
    var frame_join1 = spark.sql(
      """
        |select
        |a.*,
        |--cast(ceil(cast(a.sellCount-b.sellCount as bigint)/42*30) as bigint) as sellCount_tmp
        |cast(ceil(cast(a.sellCount-b.sellCount as bigint)) as bigint) as sellCount_tmp
        |from
        |month_6 a
        |join
        |month_5 b
        |on a.good_id=b.good_id
      """.stripMargin
    ).drop("sellCount")
      .withColumnRenamed("sellCount_tmp", "sellCount")
    //==========关联不上的关联上上个月数据=======上上月数据
    //关联不到的
    spark.sql(
      """
        |select
        |a.*
        |from
        |month_6 a
        |left join
        |month_5 b
        |on a.good_id = b.good_id
        |where b.good_id is null
      """.stripMargin
    ).registerTempTable("tab_nojoin")
    //上上月数据
    spark.read.json(last_lastMonthPath).where("sellCount !='-1'").registerTempTable("month_last3")
    spark.sql(
      """
        |select
        |*
        |from
        |  (select
        |  *,
        |  row_number() over(partition by good_id order by sellCount desc) g
        |  from
        |  month_last3 )t
        |where g=1
      """.stripMargin
    ).drop("g").registerTempTable("month_7")
    var frame_join2 = spark.sql(
      """
        |select
        |a.*,
        |--cast(ceil(cast(ceil((a.sellCount-b.sellCount)/2.0) as bigint)/42*30) as bigint) as sellCount_tmp
        |cast(ceil(cast(ceil((a.sellCount-b.sellCount)/2.0) as bigint)) as bigint) as sellCount_tmp
        |from
        |tab_nojoin a
        |join
        |month_7 b
        |on a.good_id=b.good_id
      """.stripMargin
    ).drop("sellCount").withColumnRenamed("sellCount_tmp", "sellCount")
    //关联不到上上个月的数据作为新增
    var frame_nojoin = spark.sql(
      """
        |select
        |a.*
        |from
        |tab_nojoin a
        |left join
        |month_7 b
        |on a.good_id = b.good_id
        |where b.good_id is null
      """.stripMargin
    ).where("sellCount > 0")
    frame_nojoin.registerTempTable("no_join")
    println("==========新增的数据量===============")
    spark.sql(
      """
        |select
        |count(1),
        |cast(sum(sellCount) as bigInt) sum_sellcount,
        |cast(sum(sellCount*priceText) as bigInt) sum_salesamount
        |from
        |no_join
      """.stripMargin).show()
    frame_nojoin.where("sellCount>0").repartition(1).write.orc(s"${newAddPath}")

    println("本月数据路径："+sourcePath)
    println("上月数据路径："+lastMonthPath)
    println("上上月数据路径："+last_lastMonthPath)
    //新增数据不参与计算  11月开始
    println("关联到上月的条数："+frame_join1.count())
    println("关联到的上上月条数："+frame_join2.count())
    println("关联不到的条数："+frame_nojoin.count())

    //关联到上个月+关联到上上个月+新增的
    val frame_all =  spark.read.json(frame_join1.toJSON.rdd.union(frame_join2.toJSON.rdd)
      .union(frame_nojoin.toJSON.rdd)) //包含新增

//    val frame_all =  spark.read.json(frame_join1.toJSON.rdd.union(frame_join2.toJSON.rdd)) //不包含新增

    println("sellCount部分的数据：")
    frame_all.registerTempTable("all")
        spark.sql(
          """
            |select
            |count(1),
            |cast(sum(sellCount) as bigInt) sum_sellcount,
            |cast(sum(sellCount*priceText) as decimal(20,2)) sum_salesamount
            |from
            |all
          """.stripMargin).show()
    //累计销量保存路径
    frame_all.repartition(4).write.orc(sellPath)

    frame_all
  }

  /**
   * 没有累计销量的计算规则
   * 时间轴：按11月份开始   8---9---10---11
   * sellCount90:三个月销量  sellCount90_11 - （good_10 + good_9）
   */
  def mttgCaculate_90(spark: SparkSession,dataFrame: DataFrame): DataFrame = {

    dataFrame.drop("sellCount").registerTempTable("month_6")

    //=================上月数据
    spark.read.orc(lastGoodPath1).select("good_id","sellCount").dropDuplicates("good_id").registerTempTable("month_t1")
    spark.read.orc(lastGoodPath2).select("good_id","sellCount").dropDuplicates("good_id").registerTempTable("month_t2")

    //前两个月关联到的销量相加
    spark.sql(
      """
        |select
        |a.good_id,
        |(a.sellCount + b.sellCount) as sellCount
        |from month_t1 a
        |join month_t2 b
        |on a.good_id = b.good_id
      """.stripMargin
    ).dropDuplicates("good_id").registerTempTable("month_5")

    //关联到的相减，关联不上的数据除以3求其平均
    var frame34 = spark.sql(
      """
        |select
        |a.*,
        |case when b.good_id is not null then cast(a.sellCount90-b.sellCount as bigint)
        |     else  cast(ceil(a.sellCount90/3.0) as bigint ) end sellCount_tmp
        |from
        |month_6 a
        |left join
        |month_5 b
        |on a.good_id = b.good_id
        |--where b.good_id is null
      """.stripMargin
    ).withColumnRenamed("sellCount_tmp", "sellCount")


    println("sellCount90部分的数据：")
    frame34.registerTempTable("all")
    spark.sql(
      """
        |select
        |count(1),
        |cast(sum(sellCount) as bigInt) sum_sellcount,
        |cast(sum(sellCount*priceText) as decimal(20,2)) sum_salesamount
        |from
        |all
          """.stripMargin).show()

    frame34
  }

  def mttgCaculate_180(spark: SparkSession,dataFrame: DataFrame): DataFrame = {

    dataFrame.drop("sellCount").registerTempTable("month_6")

    //=================上月数据
    spark.read.orc(lastGoodPath1).select("good_id","sellCount").dropDuplicates("good_id").registerTempTable("month_t1")
    spark.read.orc(lastGoodPath2).select("good_id","sellCount").dropDuplicates("good_id").registerTempTable("month_t2")
    spark.read.orc(lastGoodPath3).select("good_id","sellCount").dropDuplicates("good_id").registerTempTable("month_t3")
    spark.read.orc(lastGoodPath4).select("good_id","sellCount").dropDuplicates("good_id").registerTempTable("month_t4")
    spark.read.orc(lastGoodPath5).select("good_id","sellCount").dropDuplicates("good_id").registerTempTable("month_t5")

    //前5个月关联到的销量相加
    spark.sql(
      """
        |
        |select
        |a.good_id,
        |(a.sellCount + b.sellCount)as sellCount
        |from
        |     (select a.good_id,(a.sellCount + b.sellCount)as sellCount
        |     from
        |         (select a.good_id,(a.sellCount + b.sellCount)as sellCount
        |         from
        |           (select a.good_id,(a.sellCount + b.sellCount)as sellCount from month_t1 a join month_t2 b on a.good_id = b.good_id) a
        |            join month_t3 b on a.good_id = b.good_id) a
        |     join month_t4 b on a.good_id = b.good_id) a
        |join month_t5 b
        |on a.good_id = b.good_id
        |
      """.stripMargin
    ).dropDuplicates("good_id").registerTempTable("month_5")

    //关联到的相减，关联不上的数据除以6求其平均
    var frame34 = spark.sql(
      """
        |select
        |a.*,
        |case when b.good_id is not null then cast(a.sellCount180-b.sellCount as bigint)
        |     else cast(ceil(a.sellCount180/6.0) as bigint ) end sellCount_tmp
        |from
        |month_6 a
        |left join
        |month_5 b
        |on a.good_id = b.good_id
        |--where b.good_id is null
      """.stripMargin
    ).withColumnRenamed("sellCount_tmp", "sellCount")

    println("sellCount180部分的数据：")
    frame34.registerTempTable("all")
    spark.sql(
      """
        |select
        |count(1),
        |cast(sum(sellCount) as bigInt) sum_sellcount,
        |cast(sum(sellCount*priceText) as decimal(20,2)) sum_salesamount
        |from
        |all
          """.stripMargin).show()

    frame34
  }

  /** *
    * 关联分类
    *
    * @param spark
    * @param frame
    * @return 关联好分类的所有商品信息
    */
  def mttgCate(spark: SparkSession, frame: DataFrame): DataFrame = {

    //需要拆分匹配的
    frame.where("categoryId ='38' or categoryId ='65'").registerTempTable("tab_chai")

    frame.where("categoryId !='38' and categoryId !='65'").registerTempTable("tab_join")
    /** *
      *
      * �?要匹配的分类
      */
    val frame9 = spark.sql(
      """
        |select
        |*,
        |case
        |when categoryId ='38' then '10031'
        |when categoryId ='65' then '10027'
        |else '10099' end
        |firstCategoryId,
        |case
        |when categoryId ='38' then (case when categoryName rlike '桌游' then '1003106' else '1003199' end)
        |when categoryId ='65' then (case when categoryName rlike '婚庆' then '1002709' else '1002704' end)
        |else '1009999' end
        |secondCategoryId,
        |'100999999' thirdCategoryId,
        |'10099999999' fourthCategoryId
        |from
        |tab_chai
      """.stripMargin)

    /** *
      * 不需要匹配的分类数据
      * 10099,1009999,100999999,10099999999
      */
    spark.read.json(catePath).dropDuplicates("categoryId")
      .registerTempTable("cate")
    var frame001 = spark.
      sql(
        """
          |select a.*,
          |IFNULL(b.firstCategoryId,"10099") firstCategoryId,
          |IFNULL(b.secondCategoryId,"1009999") secondCategoryId,
          |"100999999" thirdCategoryId,
          |"10099999999" fourthCategoryId
          |from
          |tab_join a
          |left join
          |cate b
          |on a.categoryId = b.categoryId
        """.stripMargin
      )

    frame9.union(frame001).registerTempTable("JAN_100")

    var dataAll = spark.sql(
      """
        |select
        |case
        |when firstCategoryId='10028'  then (case when secondCategoryId='1002802' or secondCategoryId='1002803' or secondCategoryId='1002806' then '01001'
        |                                         when secondCategoryId='1002801' then '01002'
        |                                         when secondCategoryId='1002805' then '01003' else '01999' end)
        |
        |end food_type,
        |*,
        |cast(sellCount*priceText as decimal(20,2)) salesAmount
        |from
        |JAN_100
      """.stripMargin
    ).drop("longitude", "latitude").drop("address").where("sellCount>0")

    dataAll.registerTempTable("f1")

    spark.sql(
      """
        |select
        | *,
        | firstCategoryId as firstCategoryId1,
        | case when secondCategoryId = '1009999' then concat(firstCategoryId,'99') else secondCategoryId end secondCategoryId1
        | from
        | f1
      """.stripMargin)
      .registerTempTable("f2")
    spark.sql(
      """
        |select
        | *,
        | case when thirdCategoryId = '100999999' then concat(secondCategoryId1,'99') else  thirdCategoryId end thirdCategoryId1
        | from
        | f2
      """.stripMargin)
      .registerTempTable("f3")
    spark.sql(
      """
        |select
        | *,
        | case when fourthCategoryId = '10099999999' then concat(thirdCategoryId1,'99') else  fourthCategoryId end fourthCategoryId1
        | from
        | f3
      """.stripMargin)
      .registerTempTable("f4")
    var t0_1 = spark.sql(
      """
        |select
        |*
        |from
        |f4
      """.stripMargin).drop("firstCategoryId","secondCategoryId","thirdCategoryId","fourthCategoryId")
      .withColumnRenamed("firstCategoryId1","firstCategoryId")
      .withColumnRenamed("secondCategoryId1","secondCategoryId")
      .withColumnRenamed("thirdCategoryId1","thirdCategoryId")
      .withColumnRenamed("fourthCategoryId1","fourthCategoryId")

    t0_1
  }

  /** *
    * 关联地址
    *
    * @param spark
    * @param frame
    * @param addressPath 每个月处理的地址路径
    * @return 关联好地址的所有商品信息
    */
  def mttgAddress(spark: SparkSession, frame: DataFrame, addressPath: String): DataFrame = {
    frame.registerTempTable("all")

    val add = spark.read.json(addressPath).dropDuplicates("shopId")
    add.registerTempTable("address")
    //关联不到的地址打上美团团购的信息
    var address = spark.sql(
      """
        |select t1.*,
        |case when t2.shopId is not null then t2.administrative_region  else  '0'                   end administrative_region,
        |case when t2.shopId is not null then t2.city  else  '0'                                      end  city,
        |case when t2.shopId is not null then t2.city_grade  else  '0'                                    end  city_grade,
        |case when t2.shopId is not null then t2.city_origin  else  '0'                               end  city_origin,
        |case when t2.shopId is not null then t2.district  else  '0'                                  end  district,
        |case when t2.shopId is not null then t2.district_origin  else  '0'                           end  district_origin,
        |case when t2.shopId is not null then t2.economic_division  else  '0'                             end  economic_division,
        |case when t2.shopId is not null then t2.if_city  else '0'                                        end  if_city,
        |case when t2.shopId is not null then t2.if_district  else  '0'                                   end  if_district,
        |case when t2.shopId is not null then t2.if_state_level_new_areas  else  '0'                      end  if_state_level_new_areas,
        |case when t2.shopId is not null then t2.poor_counties  else  '0'                                 end  poor_counties,
        |case when t2.shopId is not null then t2.province  else  '0'                                  end  province,
        |case when t2.shopId is not null then t2.regional_ID  else  '0'                              end  regional_ID,
        |case when t2.shopId is not null then t2.rural_demonstration_counties  else  '0'                  end  rural_demonstration_counties,
        |case when t2.shopId is not null then t2.rural_ecommerce  else  '0'                               end  rural_ecommerce,
        |case when t2.shopId is not null then t2.the_belt_and_road_city  else  '0'                        end  the_belt_and_road_city,
        |case when t2.shopId is not null then t2.the_belt_and_road_province  else  '0'                    end  the_belt_and_road_province,
        |case when t2.shopId is not null then t2.the_yangtze_river_economic_zone_city  else  '0'          end  the_yangtze_river_economic_zone_city,
        |case when t2.shopId is not null then t2.the_yangtze_river_economic_zone_province  else  '0'      end  the_yangtze_river_economic_zone_province,
        |case when t2.shopId is not null then t2.urban_agglomerations  else  '0'                          end  urban_agglomerations,
        |case when t2.shopId is not null then t2.address  else  '0'                                       end  address,
        |case when t2.shopId is not null then t2.latitude  else  '-1'                                     end latitude,
        |case when t2.shopId is not null then t2.longitude  else  '-1'                                    end longitude,
        |case when t2.shopId is not null then t2.aedzId  else  '-1'                                       end  aedzId,
        |case when t2.shopId is not null then t2.town  else '-1'                                          end  town
        | from all t1 left join address t2
        |on t1.shopId = t2.shopId
      """.stripMargin
    )

    address

  }


  /** *
    * 提取店铺
    */
  def shopTiQu(spark: SparkSession, frame: DataFrame, shopPath: String): DataFrame = {
    frame.registerTempTable("FEB_5")
    val meituan_shop = spark.sql(
      """
        |select
        |shopId,
        |cast(sum(sellCount) as Long) totalSellCount,
        |cast(sum(salesAmount) as decimal(20,2)) totalSalesAmount
        |from
        |FEB_5
        |group by
        |shopId
      """.stripMargin)
    meituan_shop.registerTempTable("meituan_shop")

    /**
      * a.shopType
      * 0411去掉了
      */
    val mttg_shop_all = spark.sql(
      """
        |select
        |
        |a.timeStamp,
        |a.platformId,
        |a.platformName,
        |a.goodRatePercentage,
        |a.shopImages,
        |a.opening_hours,
        |a.emotionalKeywords,
        |a.shopCommentCount,
        |cast(a.evaluates as String) evaluates,
        |a.shopId,
        |a.shopUrl,
        |a.shopName,
        |a.longitude,
        |a.latitude,
        |a.town,
        |a.aedzId,
        |cast(a.phone as String) phone,
        |a.shopSellCount,
        |a.address,
        |a.administrative_region,
        |a.city,
        |a.city_grade,
        |a.city_origin,
        |a.district,
        |a.district_origin,
        |a.economic_division,
        |a.if_city,
        |a.if_district,
        |a.if_state_level_new_areas ,
        |a.poor_counties,
        |a.province,
        |a.regional_ID,
        |a.rural_demonstration_counties ,
        |a.rural_ecommerce,
        |a.the_belt_and_road_city ,
        |a.the_belt_and_road_province ,
        |a.the_yangtze_river_economic_zone_city ,
        |a.the_yangtze_river_economic_zone_province,
        |a.urban_agglomerations,
        |
        |b.totalSellCount,
        |b.totalSalesAmount
        |
        |from FEB_5 a
        |left join
        |meituan_shop b
        |on a.shopId = b.shopId
      """.stripMargin)
      .dropDuplicates("shopId")

    mttg_shop_all.repartition(1).write.json(shopPath_json)
    mttg_shop_all.repartition(1).write.orc(shopPath_orc)

    mttg_shop_all

  }

  /** *
    * 校验分类条数，总销量销售额，省市县等销量销售额
    */
  def checkMethod(frame: DataFrame, spark: SparkSession): Unit = {

    frame.registerTempTable("Mar_1")
//    //mysql连接
//    val prop = new Properties()
//    prop.put("user", "root")
//    prop.put("password", "123456")

    //统计平台的总销售额，总销量，总条数。
    val t1 = spark.sql(
      """
        |select
        |timeStamp,
        |platformId,
        |count(1) as data_count,
        |sum(salesAmount) as sum_salesAmount,
        |sum(sellCount) as sum_sellCount
        |
        |from
        |Mar_1
        |group by platformId,timeStamp
      """.stripMargin)

//    t1.write.mode("append").jdbc("jdbc:mysql://192.168.0.2:3306/O2Odata_View?useUnicode=true&characterEncoding=utf8", "O2Odata_View.sell_sales_table", prop)
//    t1.repartition(1).write.json(s"${strPath}/2019/checkout/sales/${month}/${platform}.csv")
    //data_checkout/2019/${month}/${platform}/sales
    t1.repartition(1).write.option("header","true").csv(s"${strPath}/data_checkout/2019/${month}/${platform}/sales")

    // 统计各省市县下的销售量、销售额、条数
    val t2 = spark.sql(
      """
        |select
        |platformId,
        |timeStamp,
        |province,
        |city,
        |district,
        |count(1) dataCount,
        |sum(salesAmount) salesAmount,
        |sum(sellCount) sellCount
        |from Mar_1
        |group by province,city,timeStamp,platformId,district
      """.stripMargin)
//    t2.write.mode("append").jdbc("jdbc:mysql://192.168.0.2:3306/O2Odata_View?useUnicode=true&characterEncoding=utf8", "O2Odata_View.province_table", prop)
//    t2.repartition(1).write.option("header","true").csv(s"${strPath}/2019/checkout/province/${month}/${platform}.csv")

    t2.repartition(1).write.option("header","true").csv(s"${strPath}/data_checkout/2019/${month}/${platform}/province")

    // 统计各个分类下的销售量、销售额、条数
    val t3 = spark.sqlContext.sql(
      """
        |select
        |platformId,
        |timeStamp,
        |firstCategoryId,
        |secondCategoryId,
        |thirdCategoryId,
        |count(1) as cate_Count,
        |sum(sellCount) cate_sellCount,
        |sum(salesAmount) cate_salesAmount
        |
        |from Mar_1
        |group by firstCategoryId,secondCategoryId,thirdCategoryId,platformId,timeStamp
      """.stripMargin)
//    t3.write.mode("append").jdbc("jdbc:mysql://192.168.0.2:3306/O2Odata_View?useUnicode=true&characterEncoding=utf8", "O2Odata_View.thirdcategroy_table", prop)
//    t3.repartition(1).write.option("header","true").csv(s"${strPath}/2019/checkout/category/${month}/${platform}.csv")

    t3.repartition(1).write.option("header","true").csv(s"${strPath}/data_checkout/2019/${month}/${platform}/category")
  }

}
