package com.o2o.cleaning.month.platform.ebusiness_plat.dazhongdp

import com.alibaba.fastjson.JSON
import com.mongodb.spark.MongoSpark
import com.o2o.utils.times.TimesYearAll
import org.apache.spark.SparkContext
import org.apache.spark.sql.functions.lit
import org.apache.spark.sql.{DataFrame, SparkSession}

/**
 *  2020年四月数据异常
 *  原因：采集到的非美食商品存在‘半年售的销量’；
 *        美食餐饮行业还是累计销量
 *  本次计算口径：
 *        非美食商品:关联到前5个月的结果的数据除以6进行计算销量
 *        美食商品还是按照累计销量计算：关联上个月相减，关联不到的关联上上个月相减除以2，关联不到的作为新增但是不加到结果中去
  */
object DaZhongdp_2004 {

  var collection = "dzdp_detail"
  //平台名称
  var platform = "dzdp"
  //当月的月份
  var month = "5"
  var lastMonth = "4"
  var last2Month = "3"
  //每个月固定时间戳
  var timeStamp = TimesYearAll.TIME202005
  //mongo库中的库名称
  var database = "Dzdp"

  //补采或者当月原始数据路径
  var sourcePathPart = s"s3a://o2o-sourcedata/obs-source-2020/${month}/${platform}/${collection}"
  //当月所有数据（包括原始的和补采的数据）
  var sourcePath = s"s3a://o2o-sourcedata/obs-source-2020/${month}/${platform}/*/*"
  //上月数据路径
  var lastMonthPath = s"s3a://o2o-sourcedata/obs-source-2020/${lastMonth}/${platform}/*/*"
  //上上月数据路径
  var last2MonthPath = s"s3a://o2o-sourcedata/obs-source-2020/${last2Month}/${platform}/*/*"
  //补采路径
  var buPath = s"s3a://o2o-dataproces-group/chen_lixiu/2020/${month}/sourceBuCai/${platform}/bu"
  //地址路径    3月份地址没有新增 但是更新了开发区
  var address = s"s3a://o2o-dimension-table/address_table/address_table_2020/1/address_platform/dzdp_address_2020_1/*"
  //二级id分类路径
  var catePath = "s3a://o2o-dimension-table/category_table/cate/cate0401/dzdp/categoryId/*"
  //name分类路径
  var cateNamePath = "s3a://o2o-dimension-table/category_table/cate/cate0401/dzdp/categoryName/*"
  //新增数据路径
  var newAddPath = s"s3a://o2o-dataproces-group/chen_lixiu/2020/${month}/${platform}/newAdd"
  //商品的清洗结果路径
  var resultPath = s"s3a://o2o-dataproces-group/chen_lixiu/2020/${month}/${platform}/good"
  //提取的店铺路径
  var shopPath_json = s"s3a://o2o-sourcedata/obs_result_shop/2020/${month}/dazhongdp"
  var shopPath_orc = s"s3a://dws-data/g_shop/2020/${month}/dazhongdp"
  //校验路径
  var strPath = "s3a://o2o-dataproces-group/chen_lixiu/"

  def main(args: Array[String]): Unit = {

    //第一步：拉取原始数据
      //spark mongo连接配置
      val spark = SparkSession.builder()
//        .master("local[*]")
        .appName("MongoSparkConnectorIntro")
        .config("spark.mongodb.input.uri", "mongodb://root:O2Odata123!@ 192.168.0.149:27017/admin")
        .config("spark.mongodb.input.database", s"${database}")
        .config("spark.mongodb.input.collection", s"${collection}")
        .config("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
        .getOrCreate()
      //obs设置
      var sc: SparkContext = spark.sparkContext
      sc.hadoopConfiguration.set("fs.s3a.access.key", "GAO7EO9FWKPJ8WFCQDME")
      sc.hadoopConfiguration.set("fs.s3a.secret.key", "LZ0xaHBSYKHaJ9ECDbX9f7zin79UZkXfGoNapRPL")
      sc.hadoopConfiguration.set("fs.s3a.endpoint", "https://obs.cn-north-1.myhuaweicloud.com")
      sc.setLogLevel("ERROR")

    //第一步拉取补采数据
    mongoExport(spark,sc,month, platform, database, collection)
    //第二步：计算加清洗
    //美食类数据计算--累计销量
  var df_caucl_1: DataFrame = dzdpCaculate(spark, s"${sourcePath}", s"${lastMonthPath}", s"${newAddPath}").where("sellCount>0").where("priceText>0").cache()

    val path1 = "s3a://dws-data/g_data/2020/4/dazhongdp/"
    val path5 = "s3a://dws-data/g_data/2020/3/dazhongdp/"
    val path2 = "s3a://dws-data/g_data/2020/2/dazhongdp/"
    val path3 = "s3a://dws-data/g_data/2020/1/dazhongdp/"
    val path4 = "s3a://dws-data/g_data/2019/12/dazhongdp/"

    spark.read.json(sourcePath).where("rootCategoryId not in (10,50,20,80,55,90,40)").registerTempTable("all")
    val df1 =  spark.read.orc(path1).select("good_id","sellCount")
    val df2 =  spark.read.orc(path2).select("good_id","sellCount")
    val df3 =  spark.read.orc(path3).select("good_id","sellCount")
    val df4 =  spark.read.orc(path4).select("good_id","sellCount")
    val df5 =  spark.read.orc(path5).select("good_id","sellCount")

    df1.union(df2.union(df3.union(df4.union(df5)))).dropDuplicates("good_id").registerTempTable("old")
    //关联到的
    val df_res = spark.sql(
      """
        |select
        |a.*,
        |cast (ceil(a.sellCount/6.0) as bigint) sellCount_tmp
        |--cast (ceil((a.sellCount - b.sellCount)/6) as bigint) sellCount_tmp
        |from all a
        |join
        |(select good_id,sum(sellCount) sellCount from old group by good_id) b
        |on a.good_id=b.good_id
        |""".stripMargin)
      .drop("sellCount").withColumnRenamed("sellCount_tmp","sellCount")
    //关联不到的
    val df_res2 = spark.sql(
      """
        |select
        |a.*
        |from all a
        |left join
        |(select good_id,sum(sellCount) sellCount from old group by good_id) b
        |on a.good_id=b.good_id
        |where b.good_id is null
        |""".stripMargin)

    val df_caucl_2 = df_res.where("sellCount >'0'")

    val df_caucl = spark.read.json(df_caucl_1.toJSON.rdd.union(df_caucl_2.toJSON.rdd))
    //非美食类数据计算--半年销量
    //第三步：打标签
     var df_label = df_caucl
        .withColumn("timeStamp", lit(s"${timeStamp}")).withColumn("platformName", lit("大众点评"))
        .withColumn("platformId", lit("24"))
      //第三步：关联分类
      val df_cate = dzdpCate(spark, df_label)
      //第四步：关联地址
      val df_addr = dzdpAddress(spark, df_cate, address)
      //第五步：商品数据落地
      df_addr.repartition(1).write.json(resultPath)
      //第六步：提取店铺
//      val frame3 = shopTiQu(spark, df_addr, shopPath_json)
      //释放df_caucl的cache
      df_caucl.unpersist
  }

  /**
    * 从mongo中拉取数据到OBS
    *
    * @param month
    * @param platform
    * @param collection
    */
  def mongoExport(spark:SparkSession,sc:SparkContext,month: String, platform: String, database: String, collection: String): Unit = {
    //spark mongo连接配置

    val rdd = MongoSpark.load(sc)
    val values = rdd.map(line => {
      val nObject = JSON.parseObject(line.toJson())

      //去掉MongoDB中的索引id
      nObject.remove("_id")

      //扁平化flavors里面的分类字段
      val flavors = nObject.getJSONArray("flavors")

      val rootCategoryId = flavors.getJSONObject(0).get("rootCategoryId").toString
      val rootCategoryName = flavors.getJSONObject(0).get("rootCategoryName").toString
      val categoryId = flavors.getJSONObject(0).get("categoryId").toString
      val categoryName = flavors.getJSONObject(0).get("categoryName").toString

      nObject.put("rootCategoryId",rootCategoryId)
      nObject.put("rootCategoryName",rootCategoryName)
      nObject.put("categoryId",categoryId)
      nObject.put("categoryName",categoryName)

      nObject
    })
    values.repartition(1).saveAsTextFile(sourcePathPart)

  }

  /** *
    * 商品计算，关联上的数据相减 取有效数据(销量大于0) 关联不上的数据当做新增 同时把数据中的分类ID和name提取出来
    *
    * @param spark
    * @param sourcePath    当月数据
    * @param lastMonthPath 上月数据
    * @param newAddPath    新增数据
    * @return
    */
  def dzdpCaculate(spark: SparkSession, sourcePath: String, lastMonthPath: String, newAddPath: String): DataFrame = {

    //---------------------当月数据
    var uu = spark.read.json(s"${sourcePath}").where("rootCategoryId in (10,50,20,80,55,90,40)").registerTempTable("month_test1")
    spark.sql(
      """
        |select * from
        |   (select *, row_number() over(partition by good_id order by sellCount desc) k1 from month_test1 )
        |where k1=1
      """.stripMargin
    ).drop("k1").drop("k").registerTempTable("month_6")
    //-----------------上月数据
    var uy = spark.read.json(s"${lastMonthPath}").registerTempTable("month_test2")
    spark.sql(
      """
        |select * from
        |         (select *, row_number() over(partition by good_id order by sellCount desc ) k1
        |from month_test2 )t where k1=1
      """.stripMargin
    ).drop("k1").registerTempTable("month_5")

    //关联到的累计销量相减：当月-上月
    var frame35 = spark.sql(
      """
        |select
        |a.*,
        |cast((a.sellCount-b.sellCount) as bigint ) as sellCount_tmp
        |from
        |month_6 a
        |join
        |month_5 b
        |on a.good_id = b.good_id
      """.stripMargin
    ).drop("sellCount").withColumnRenamed("sellCount_tmp", "sellCount")

    //-----------------关联不到关联上上个月
    spark.sql(
      """
        |select
        |a.*,
        |cast(a.sellCount as bigint ) as sellCount_tmp
        |from
        |month_6 a
        |left join
        |month_5 b
        |on a.good_id=b.good_id
        |where b.good_id is null
      """.stripMargin
    ).drop("sellCount").withColumnRenamed("sellCount_tmp", "sellCount").registerTempTable("t_nojoin2")
    //-----------------------上上月数据
    var uy2 = spark.read.json(s"${last2MonthPath}").where("rootCategoryId='10'").registerTempTable("month_test3")
    spark.sql(
      """
        |select * from
        |            (select *, row_number() over(partition by good_id order by sellCount desc ) k1
        |            from month_test3 )t where k1=1
      """.stripMargin
    ).drop("k1").registerTempTable("month_4")

    var frame34 = spark.sql(
      """
        |select
        |a.*,
        |cast(ceil((a.sellCount-b.sellCount)/2.0) as bigint) as sellCount_tmp
        |from
        |t_nojoin2 a
        |join
        |month_4 b
        |on a.good_id=b.good_id
      """.stripMargin
    ).drop("sellCount")
      .withColumnRenamed("sellCount_tmp", "sellCount")

    //关联不到作为新增
    var frame36 = spark.sql(
      """
        |select
        |a.*,
        |cast(a.sellCount as bigint ) as sellCount_tmp
        |from
        |t_nojoin2 a
        |left join
        |month_4 b
        |on a.good_id=b.good_id
        |where b.good_id is null
      """.stripMargin
    ).drop("sellCount").withColumnRenamed("sellCount_tmp", "sellCount")

    println("==========新增的数据量===============")
    frame36.registerTempTable("newAdd")
    spark.sql(
      """
        |select
        |count(1),
        |cast(sum(sellCount) as bigInt),
        |cast(sum(sellCount*priceText) as bigInt)
        |sell
        |from
        |newAdd
      """.stripMargin).show()

    frame36.repartition(1).write.json(s"${newAddPath}")

    println("本月数据路径："+sourcePath)
    println("上月数据路径："+lastMonthPath)
    println("上上月数据路径："+last2MonthPath)

    println("关联到上月的条数："+frame35.count())
    println("关联到上上月的条数："+frame34.count())
    println("新增数据的条数："+frame36.count())

    val frame33 = frame35.union(frame34)
    frame33  //加上关联上上个月的数据
  }

  /** *
    * 关联分类
    *
    * @param spark
    * @param frame
    * @return 关联好分类的所有商品信息
    */
  def dzdpCate(spark: SparkSession, frame: DataFrame): DataFrame = {
    /** *
      * 整理需要匹配的数据分类
      */
    frame.where("categoryId  ='112' or categoryId ='33808' or categoryId='5804'").registerTempTable("extend_table_th1_rlike")

    frame.where("categoryId !='112' and categoryId !='33808' and categoryId!='5804'").registerTempTable("extend_table_th1")

    //拆分表：extend_table_th1_rlike
    val frame9 = spark.sql(
      """
        |select
        |case
        |when categoryId ='112'  then (case when title rlike '快餐便当' or title  rlike '快餐'      or title rlike '便当'          or shopName rlike '快餐' or shopName rlike '便当'  then '10028' else '10028' end)
        |when categoryId ='33808'then (case when title rlike '国内'    or title  rlike '国内游' then '10032' else '10032' end)
        |when categoryId ='5804' then (case when title rlike '艺术'    or title  rlike '艺术培训'   or shopName rlike '艺术'       or shopName rlike '艺术培训' then '10030'
        |                                   when title rlike '外语'    or title  rlike '外语培训'   or shopName rlike '外语'       or shopName rlike '外语培训' then '10030'
        |                                   when title rlike '驾校'    or title  rlike '驾校培训'   or shopName rlike '驾校'       or shopName rlike '驾校培训' then '10030'
        |											              when title rlike '职业技术培训' or title rlike '职业技术'or shopName rlike '职业技术培训' or shopName rlike '职业技术' then '10030'
        |											              when title rlike '留学服务' or title  rlike '留学' then '10030'
        |											              when title rlike '升学辅导' or title  rlike '升学'      or shopName rlike '升学'        or shopName rlike '升学辅导' then '10030'
        |											              when title rlike '兴趣生活' or title  rlike '兴趣'      or shopName rlike '兴趣生活' then '10030'   else '10030' end)
        |
        |else '10099'
        |end firstCategoryId,
        |
        |
        |case
        |when categoryId ='112'  then (case when title rlike '快餐便当' or title  rlike '快餐'       or title rlike '便当'          or shopName rlike '快餐' or shopName rlike '便当'  then '1002801' else '1002804' end)
        |when categoryId ='33808'then (case when title rlike '国内'     or title rlike '国内游' then '1003201' else '1003202' end)
        |when categoryId ='5804' then (case when title rlike '艺术'     or title rlike '艺术培训'    or shopName rlike '艺术'       or shopName rlike '艺术培训' then '1003001'
        |                                   when title rlike '外语'     or title rlike '外语培训'    or shopName rlike '外语'       or shopName rlike '外语培训' then '1003002'
        |                                   when title rlike '驾校'     or title rlike '驾校培训'    or shopName rlike '驾校'       or shopName rlike '驾校培训' then '1003003'
        |											              when title rlike '职业技术培训' or title rlike '职业技术' or shopName rlike '职业技术培训' or shopName rlike '职业技术' then '1003004'
        |											              when title rlike '留学服务'  or title rlike '留学' then '1003005'
        |											              when title rlike '升学辅导'  or title rlike '升学'       or shopName rlike '升学'        or shopName rlike '升学辅导' then '1003006'
        |											              when title rlike '兴趣生活'  or title rlike '兴趣'       or shopName rlike '兴趣生活' then '1003007' else '1003099' end)
        |
        |else '1009999'
        |end secondCategoryId,
        |*
        |
        |from
        |extend_table_th1_rlike
      """.
        stripMargin).withColumn("thirdCategoryId", lit("100999999")).withColumn("fourthCategoryId", lit("10099999999"))


    /** *
      * 根据ID关联分类表   表extend_table_th1匹配分类表
      */
    spark.read.json(s"${catePath}").dropDuplicates("categoryId").registerTempTable("cate")
    var g_0021 = spark.sql(
      """
        |select
        |b.firstCategoryId firstCategoryId,
        |b.secondCategoryId secondCategoryId,
        |a.*
        |from
        |extend_table_th1 a
        |join
        |cate b
        |on a.categoryId=b.categoryId
      """.
        stripMargin
    ).withColumn("thirdCategoryId", lit("100999999")).withColumn("fourthCategoryId", lit("10099999999"))

    /** *
     * 根据name关联分类
     */
    spark.sql(
      """
        |select a.*
        |from
        |extend_table_th1 a
        |left join
        |cate b
        |on a.categoryId=b.categoryId
        |where b.categoryId is NULL
      """.stripMargin
    ).registerTempTable("no_join_cateID")

    spark.read.json(s"${cateNamePath}").dropDuplicates("categoryName").registerTempTable("name_cate")

    var g_0022 = spark.sql(
      """
        |select
        |IFNULL(b.firstCategoryId,"10099") firstCategoryId,
        |IFNULL(b.secondCategoryId,"1009999") secondCategoryId,
        |a.*
        |from
        |no_join_cateID a
        |left join
        |name_cate b
        |on concat(a.rootCategoryName,a.categoryName) = b.categoryName
      """.stripMargin
    ).withColumn("thirdCategoryId", lit("100999999")).
      withColumn("fourthCategoryId", lit("10099999999"))

    /** *
      * 修改100999999
      */
    g_0021.union(g_0022).union(frame9).registerTempTable("JAN_100")
    var dataAll = spark.sqlContext.sql(
      """
        |select
        |case
        |when firstCategoryId='10028'  then (case when secondCategoryId='1002802' or secondCategoryId='1002803'  or secondCategoryId='1002806' then '01001'
        |                                         when secondCategoryId='1002801' then '01002'
        |                                         when secondCategoryId='1002805' then '01003'  else '01999' end)
        |
        |end food_type,
        |
        |*,
        |cast(sellCount*priceText as decimal(20,2)) salesAmount
        |from
        |JAN_100
      """.stripMargin
    ).drop("longitude", "latitude").drop("address")


    //修改100999999
    dataAll.registerTempTable("f1")

    spark.sql(
      """
        |select
        | *,
        | firstCategoryId as firstCategoryId1,
        | case when secondCategoryId = '1009999' then concat(firstCategoryId,'99') else secondCategoryId end secondCategoryId1
        | from
        | f1
      """.stripMargin)
      .registerTempTable("f2")
    spark.sql(
      """
        |select
        | *,
        | case when thirdCategoryId = '100999999' then concat(secondCategoryId1,'99') else  thirdCategoryId end thirdCategoryId1
        | from
        | f2
      """.stripMargin)
      .registerTempTable("f3")
    spark.sql(
      """
        |select
        | *,
        | case when fourthCategoryId = '10099999999' then concat(thirdCategoryId1,'99') else  fourthCategoryId end fourthCategoryId1
        | from
        | f3
      """.stripMargin)
      .registerTempTable("f4")
    var t0_1 = spark.sql(
      """
        |select
        |*
        |from
        |f4
      """.stripMargin).drop("firstCategoryId","secondCategoryId","thirdCategoryId","fourthCategoryId")
      .withColumnRenamed("firstCategoryId1","firstCategoryId")
      .withColumnRenamed("secondCategoryId1","secondCategoryId")
      .withColumnRenamed("thirdCategoryId1","thirdCategoryId")
      .withColumnRenamed("fourthCategoryId1","fourthCategoryId")

    t0_1

  }

  /** *
    * 关联地址
    *
    * @param spark
    * @param frame
    * @param addressPath 每个月处理的地址路径
    * @return 关联好地址的所有商品信息
    */
  def dzdpAddress(spark: SparkSession, frame: DataFrame, addressPath: String): DataFrame = {
    frame.registerTempTable("all")
    val add = spark.read.json(addressPath).dropDuplicates("shopId")
    add.registerTempTable("address")
    //关联不到的地址打上自营的信息
    var address = spark.sql(
      """
        |select t1.*,
        |case when t2.shopId is not null then t2.administrative_region  else  '0'                   end administrative_region,
        |case when t2.shopId is not null then t2.city  else  '0'                                      end  city,
        |case when t2.shopId is not null then t2.city_grade  else  '0'                                    end  city_grade,
        |case when t2.shopId is not null then t2.city_origin  else  '0'                               end  city_origin,
        |case when t2.shopId is not null then t2.district  else  '0'                                  end  district,
        |case when t2.shopId is not null then t2.district_origin  else  '0'                           end  district_origin,
        |case when t2.shopId is not null then t2.economic_division  else  '0'                             end  economic_division,
        |case when t2.shopId is not null then t2.if_city  else '0'                                        end  if_city,
        |case when t2.shopId is not null then t2.if_district  else  '0'                                   end  if_district,
        |case when t2.shopId is not null then t2.if_state_level_new_areas  else  '0'                      end  if_state_level_new_areas,
        |case when t2.shopId is not null then t2.poor_counties  else  '0'                                 end  poor_counties,
        |case when t2.shopId is not null then t2.province  else  '0'                                  end  province,
        |case when t2.shopId is not null then t2.regional_ID  else  '0'                              end  regional_ID,
        |case when t2.shopId is not null then t2.rural_demonstration_counties  else  '0'                  end  rural_demonstration_counties,
        |case when t2.shopId is not null then t2.rural_ecommerce  else  '0'                               end  rural_ecommerce,
        |case when t2.shopId is not null then t2.the_belt_and_road_city  else  '0'                        end  the_belt_and_road_city,
        |case when t2.shopId is not null then t2.the_belt_and_road_province  else  '0'                    end  the_belt_and_road_province,
        |case when t2.shopId is not null then t2.the_yangtze_river_economic_zone_city  else  '0'          end  the_yangtze_river_economic_zone_city,
        |case when t2.shopId is not null then t2.the_yangtze_river_economic_zone_province  else  '0'      end  the_yangtze_river_economic_zone_province,
        |case when t2.shopId is not null then t2.urban_agglomerations  else  '0'                          end  urban_agglomerations,
        |case when t2.shopId is not null then t2.address  else  '0'                                       end  address,
        |case when t2.shopId is not null then t2.latitude  else  '-1'                                     end latitude,
        |case when t2.shopId is not null then t2.longitude  else  '-1'                                    end longitude,
        |case when t2.shopId is not null then t2.aedzId  else  '-1'                                       end  aedzId,
        |case when t2.shopId is not null then t2.town  else '-1'                                          end  town
        | from all t1 left join address t2
        |on t1.shopId = t2.shopId
      """.stripMargin
    )

    address

  }


  /** *
    * 提取店铺
    *
    * @param spark
    * @param frame
    * @param shopPath 提取的店铺路径
    */
  def shopTiQu(spark: SparkSession, frame: DataFrame, shopPath: String): DataFrame = {
    frame.registerTempTable("FEB_5")

    //根据各个shopId下的销售额和销售量
    val dzdp_shop = spark.sql(
      """
        |select
        |shopId,
        |cast(sum(sellCount) as Long) totalSellCount,
        |cast(sum(salesAmount) as double) totalSalesAmount
        |from
        |FEB_5
        |group by shopId
      """.stripMargin)
    dzdp_shop.registerTempTable("dzdp_shop")

    val dzdp_shop_all = spark.sql(
      """
        |select
        |
        |a.timeStamp,
        |a.platformId,
        |a.platformName,
        |a.goodRatePercentage,
        |a.shopImages,
        |a.opening_hours,
        |a.emotionalKeywords,
        |a.shopCommentCount,
        |cast(a.evaluates as String) evaluates,
        |a.shopId,
        |a.shopUrl,
        |a.shopName,
        |a.longitude,
        |
        |a.latitude,
        |a.town,
        |a.aedzId,
        |cast(a.phone as String) phone,
        |a.address,
        |a.administrative_region,
        |a.city,
        |a.city_grade,
        |a.city_origin,
        |a.district,
        |a.district_origin,
        |a.economic_division,
        |a.if_city,
        |a.if_district,
        |a.if_state_level_new_areas ,
        |a.poor_counties,
        |a.province,
        |a.regional_ID,
        |a.rural_demonstration_counties ,
        |a.rural_ecommerce,
        |a.the_belt_and_road_city ,
        |a.the_belt_and_road_province ,
        |a.the_yangtze_river_economic_zone_city ,
        |a.the_yangtze_river_economic_zone_province,
        |a.urban_agglomerations,
        |
        |b.totalSellCount,
        |b.totalSalesAmount
        |
        |from FEB_5 a
        |left join
        |dzdp_shop b
        |on a.shopId = b.shopId
      """.stripMargin)
      .dropDuplicates("shopId")
    dzdp_shop_all.repartition(1).write.json(shopPath_json)
    dzdp_shop_all.repartition(1).write.orc(shopPath_orc)

    dzdp_shop_all

  }


  /** *
    * 校验分类条数，总销量销售额，省市县等销量销售额
    *
    * @param frame 结果数据
    * @param spark
    */
  def checkMethod(frame: DataFrame, spark: SparkSession): Unit = {

    frame.registerTempTable("Mar_1")
    /*//mysql连接
    val prop = new Properties()
    prop.put("user", "root")
    prop.put("password", "123456")*/

    //统计平台的总销售额，总销量，总条数。
    val t1 = spark.sql(
      """
        |select
        |platformId,
        |sum(salesAmount) as sum_salesAmount,
        |sum(sellCount) as sum_sellCount,
        |count(1) as data_count,
        |timeStamp from
        |Mar_1
        |group by platformId,timeStamp
      """.stripMargin)
//    t1.write.mode("append").jdbc("jdbc:mysql://192.168.0.2:3306/O2Odata_View?useUnicode=true&characterEncoding=utf8",
//      "O2Odata_View.sell_sales_table", prop)

    t1.repartition(1).write.option("header","true").csv(s"${strPath}/data_checkout/2020/${month}/${platform}/sales")
//    t1.repartition(1).write.option("header","true").json(s"${strPath}/2019/checkout/sales/${month}/${platform}.json")

    // 统计各省市县下的销售量、销售额、条数
    val t2 = spark.sql(
      """
        |select
        |province,
        |city,
        |sum(salesAmount) salesAmount,
        |sum(sellCount) sellCount,
        |count(1) dataCount,
        |district,
        |timeStamp,
        |platformId
        |from Mar_1
        |group by province,city,timeStamp,platformId,district
      """.stripMargin)
//    t2.write.mode("append").jdbc("jdbc:mysql://192.168.0.2:3306/O2Odata_View?useUnicode=true&characterEncoding=utf8",
//      "O2Odata_View.province_table", prop)

    t2.repartition(1).write.option("header","true").csv(s"${strPath}/data_checkout/2020/${month}/${platform}/province")
//    t2.repartition(1).write.option("header","true").json(s"${strPath}/2019/checkout/province/${month}/${platform}.json")

    // 统计各个分类下的销售量、销售额、条数
    val t3 = spark.sqlContext.sql(
      """
        |select
        |platformId,
        |timeStamp,
        |firstCategoryId,
        |secondCategoryId,
        |thirdCategoryId,
        |sum(sellCount) cate_sellCount,
        |sum(salesAmount) cate_salesAmount,
        |count(1) as cate_Count
        |from Mar_1
        |group by firstCategoryId,secondCategoryId,thirdCategoryId,platformId,timeStamp
      """.stripMargin)
//    t3.write.mode("append").jdbc("jdbc:mysql://192.168.0.2:3306/O2Odata_View?useUnicode=true&characterEncoding=utf8",
//      "O2Odata_View.thirdcategroy_table", prop)

    t3.repartition(1).write.option("header","true").csv(s"${strPath}/data_checkout/2020/${month}/${platform}/category")
//    t3.repartition(1).write.option("header","true").json(s"${strPath}/2019/checkout/category/${month}/${platform}.json")
  }

}

