package com.o2o.cleaning.month.platform.ebusiness_plat

import org.apache.spark.SparkContext
import org.apache.spark.sql.{DataFrame, SparkSession}

object TiQu_shop  {

  def main(args: Array[String]) {

    val spark = SparkSession.builder()
      .master("local[*]")
      .appName("test_es")
      .config("es.nodes", "192.168.1.29")
      .config("es.port", "9200")
      .config("es.net.http.auth.user", "elastic")
      .config("es.net.http.auth.pass", "changeme")
      .config("cluster.name", "O2OElastic")
      .config("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
      .getOrCreate()
    var sc: SparkContext = spark.sparkContext
    sc.hadoopConfiguration.set("fs.s3a.access.key", "GAO7EO9FWKPJ8WFCQDME")
    sc.hadoopConfiguration.set("fs.s3a.secret.key", "LZ0xaHBSYKHaJ9ECDbX9f7zin79UZkXfGoNapRPL")
    sc.hadoopConfiguration.set("fs.s3a.endpoint", "https://obs.cn-north-1.myhuaweicloud.com")
    sc.setLogLevel("ERROR")
    /*============================================================================================*/
    //注：如果代码中有的路径吧中含有'/*'   则使用'/**  */'注释的时候会影响注释的范围：即路径中的'/*'会参与匹配

    val platformArr = Array("dazhongdp","meituan_tg")
    val year = "2020"
    val month = "8"
    for(platform <- platformArr){
      val orc_path = s"s3a://dws-data/g_data/${year}/${month}/${platform}/"
      val df_all  = spark.read.orc(orc_path)

      if(platform.equals("dazhongdp")){
        var shopPath_json = s"s3a://o2o-sourcedata/obs_result_shop/${year}/${month}/${platform}"
        var shopPath_orc = s"s3a://dws-data/g_shop/${year}/${month}/${platform}"
        shopTiQu_dzdp(spark,df_all,shopPath_json,shopPath_orc)
      }
      else if(platform.equals("meituan_tg")){
        var shopPath_json = s"s3a://o2o-sourcedata/obs_result_shop/${year}/${month}/${platform}"
        var shopPath_orc = s"s3a://dws-data/g_shop/${year}/${month}/${platform}"
        shopTiQu_mttg(spark,df_all,shopPath_json,shopPath_orc)
      }
    }

    spark.stop()
    sc.stop()

  }


  /*==================================*/

  //美团团购提取shop
  def shopTiQu_mttg(spark: SparkSession, frame: DataFrame,shopPath_json:String,shopPath_orc:String): DataFrame = {
    frame.registerTempTable("FEB_5")
    val meituan_shop = spark.sql(
      """
        |select
        |shopId,
        |cast(sum(sellCount) as Long) totalSellCount,
        |cast(sum(salesAmount) as decimal(20,2)) totalSalesAmount
        |from
        |FEB_5
        |group by
        |shopId
      """.
        stripMargin)
    meituan_shop.registerTempTable("meituan_shop")

    //a.shopType    0411去掉了
    val
    mttg_shop_all = spark.sql(
      """
        |select
        |
        |a.timeStamp,
        |a.platformId,
        |a.platformName,
        |a.goodRatePercentage,
        |a.shopImages,
        |a.opening_hours,
        |a.emotionalKeywords,
        |a.shopCommentCount,
        |a.evaluates,
        |a.shopId,
        |a.shopUrl,
        |a.shopName,
        |a.longitude,
        |a.latitude,
        |a.town,
        |a.aedzId,
        |a.phone,
        |a.shopSellCount,
        |a.address,
        |a.administrative_region,
        |a.city,
        |a.city_grade,
        |a.city_origin,
        |a.district,
        |a.district_origin,
        |a.economic_division,
        |a.if_city,
        |a.if_district,
        |a.if_state_level_new_areas ,
        |a.poor_counties,
        |a.province,
        |a.regional_ID,
        |a.rural_demonstration_counties ,
        |a.rural_ecommerce,
        |a.the_belt_and_road_city ,
        |a.the_belt_and_road_province ,
        |a.the_yangtze_river_economic_zone_city ,
        |a.the_yangtze_river_economic_zone_province,
        |a.urban_agglomerations,
        |
        |b.totalSellCount,
        |b.totalSalesAmount
        |
        |from FEB_5 a
        |left join
        |meituan_shop b
        |on a.shopId = b.shopId
      """.stripMargin)
      .
        dropDuplicates("shopId")
    mttg_shop_all.repartition(4).write.json(shopPath_json)
    mttg_shop_all.repartition(4).write.orc(shopPath_orc)
    mttg_shop_all

  }
  //大众点评提取shop
  def shopTiQu_dzdp(spark: SparkSession, frame: DataFrame,shopPath_json:String,shopPath_orc:String): DataFrame = {
    frame.registerTempTable("FEB_5")
    val dzdp_shop = spark.
      sql(
        """
          |select
          |shopId,
          |cast(sum(sellCount) as Long) totalSellCount,
          |cast(sum(salesAmount) as double) totalSalesAmount
          |from
          |FEB_5
          |group by
          |shopId
      """.stripMargin)
    dzdp_shop.registerTempTable("dzdp_shop")

    val dzdp_shop_all = spark.sql(
      """
        |select
        |
        |a.timeStamp,
        |a.platformId,
        |a.platformName,
        |a.goodRatePercentage,
        |a.shopImages,
        |a.opening_hours,
        |a.emotionalKeywords,
        |a.shopCommentCount,
        |a.evaluates,
        |a.shopId,
        |a.shopUrl,
        |a.shopName,
        |a.longitude,
        |
        |a.latitude,
        |a.town,
        |a.aedzId,
        |a.phone,
        |a.address,
        |a.administrative_region,
        |a.city,
        |a.city_grade,
        |a.city_origin,
        |a.district,
        |a.district_origin,
        |a.economic_division,
        |a.if_city,
        |a.if_district,
        |a.if_state_level_new_areas ,
        |a.poor_counties,
        |a.province,
        |a.regional_ID,
        |a.rural_demonstration_counties ,
        |a.rural_ecommerce,
        |a.the_belt_and_road_city ,
        |a.the_belt_and_road_province ,
        |a.the_yangtze_river_economic_zone_city ,
        |a.the_yangtze_river_economic_zone_province,
        |a.urban_agglomerations,
        |
        |b.totalSellCount,
        |b.totalSalesAmount
        |
        |from FEB_5 a
        |left join
        |dzdp_shop b
        |on a.shopId = b.shopId
      """
        .stripMargin)
      .dropDuplicates("shopId")
    dzdp_shop_all.repartition(4).write.json(shopPath_json)
    dzdp_shop_all.repartition(4).write.orc(shopPath_orc)

    dzdp_shop_all

  }



}
