import org.apache.spark.SparkContext
import org.apache.spark.sql.SparkSession

/**
 * 只需要执行本代码即可提取相关的主营行业了!!!!
 *
 * 主营行业：
 *  根据店铺ID提取各个标准分类下的最高的销售额的行业作为主营行业
 *    ---京东 苏宁 天猫 淘宝 拼多多 三级标准ID
 *    ---美团 饿了么 美团团购 大众点评 二级标准ID
 */
object Primary_Execute {

  def main(args: Array[String]): Unit = {
    val spark = SparkSession.builder()
      //            .master("local[*]")
      .appName("AcountAndCount")
      .config("es.port", "9200")
      .config("es.net.http.auth.user", "elastic")
      .config("es.net.http.auth.pass", "changeme")
      .config("cluster.name","O2OElastic")
      .config("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
      .getOrCreate()
    //obs连接配置信息
    var sc: SparkContext = spark.sparkContext
    sc.hadoopConfiguration.set("fs.s3a.access.key", "GAO7EO9FWKPJ8WFCQDME")
    sc.hadoopConfiguration.set("fs.s3a.secret.key", "LZ0xaHBSYKHaJ9ECDbX9f7zin79UZkXfGoNapRPL")
//    sc.hadoopConfiguration.set("fs.s3a.endpoint", "https://obs.cn-north-1.myhuaweicloud.com")
    sc.setLogLevel("ERROR")
    //----------------------------
    val year = 2020
    val month = 5

    Primary_all(spark,year,month)      //处理所有平台、所有地区的主营行业
    Primary_keqiao(spark,year,month)   //处理柯桥区的主营行业
    Primary_yuxi(spark,year,month)     //处理玉溪市茶行业的的主营行业
    Primary_zoucheng(spark,year,month) //处理邹城市的主营行业

  }

  //处理所有平台的主营行业
  def Primary_all(spark:SparkSession,year:Int,month:Int): Unit = {
    val platArr = Array("dazhongdp","elm","meituan_tg","meituan","taobao","tmall","jd","suning","pinduoduo")
    for (plat <-platArr){
      //1、餐饮行业  提取二级分类ID
      if(plat.equals("dazhongdp")||plat.equals("elm")||plat.equals("meituan")||plat.equals("meituan_tg")){
        if(plat.equals("meituan")){
          val rdd = spark.read.orc(s"s3a://dws-data/g_data/${year}/${month}/meituan/")
            .selectExpr("shopId","firstCategoryId","secondCategoryId","salesAmount","platformId")
            .registerTempTable("t")
        }else{
          val rdd = spark.read.orc(s"s3a://dws-data/g_data/${year}/${month}/${plat}/")
            .selectExpr("shopId","firstCategoryId","secondCategoryId","salesAmount","platformId")
            .registerTempTable("t")
        }
        //保存
        val dzdp = spark.sql(
          """
            |select * from
            |(select *,row_number() over(partition by shopId,platformid order by sum_sale desc) rank
            |from
            |(
            |select
            |platformId as platformid ,shopId,firstCategoryId,secondCategoryId,sum(cast(salesAmount as double)) sum_sale from t
            |group by shopId,firstCategoryId,secondCategoryId,platformId
            |))
            |where rank = 1
          """.stripMargin)
          .withColumnRenamed("secondCategoryId","shop_cate")
          .select("shopId","shop_cate","platformid")
          .repartition(1)
          //          .show()
          .write.option("header","true").csv(s"s3a://o2o-dataproces-group/chen_lixiu/新增主营行业/${year}_${month}/${plat}")
      }

      //5、电商行业   提取三级分类ID
      else if(plat.equals("taobao")||plat.equals("tmall")||plat.equals("jd")||plat.equals("suning")||plat.equals("tmall")){
        val rdd = spark.read.orc(s"s3a://dws-data/g_data/${year}/${month}/${plat}/")
          .selectExpr("shopId","firstCategoryId","secondCategoryId","thirdCategoryId","salesAmount","platformId")
          .registerTempTable("t")

        //保存
        val taobao = spark.sql(
          """
            |select *  from
            |(select *,row_number() over(partition by shopId,platformid order by sum_sale desc) rank
            |from
            |(
            |select
            |platformId as platformid,shopId,firstCategoryId,secondCategoryId,
            |case when thirdCategoryId = 'None' then concat(secondCategoryId,'99') else thirdCategoryId end thirdCategoryId,
            |sum(cast(salesAmount as double)) sum_sale from t
            |group by shopId,firstCategoryId,secondCategoryId,thirdCategoryId,platformId
            |))
            |where rank = 1
          """.stripMargin)
          .withColumnRenamed("thirdCategoryId","shop_cate")
          .select("shopId","shop_cate","platformid")
          .repartition(1)
          //          .show()
          .write.option("header","true").csv(s"s3a://o2o-dataproces-group/chen_lixiu/新增主营行业/${year}_${month}/${plat}")
      }
    }

    //根据9月份的店铺 关联原来的店铺主营行业的数据  只增不更新
    val df = spark.read.orc("s3a://o2o-dataproces-group/chen_lixiu/primary_shop_cate_all/*/*")
    //    df.show()
    df.registerTempTable("t_8")
    val df2 = spark.read.option("header","true").csv(s"s3a://o2o-dataproces-group/chen_lixiu/新增主营行业/${year}_${month}/*/*")
    df2.registerTempTable("t_9")

    spark.sql(
      """
        |select * from t_9 where shop_cate is null
        |""".stripMargin).show()
    spark.sql(
      """
        |select
        |a.shopId shopid,
        |case when a.shop_cate = 'None' then '100999999' else a.shop_cate end shop_cate,
        |a.platformid
        |from t_9 a left join t_8 b
        |on a.shopId = b.shopid and a.platformid = b.platformid
        |where b.shopid is null
        |""".stripMargin)
      .select("shopid","shop_cate","platformid")
      //      .show()
      .repartition(1).write.orc(s"s3a://o2o-dataproces-group/chen_lixiu/primary_shop_cate_all/${year}_${month}/")


  }
  //处理柯桥区的主营行业
  def Primary_keqiao(spark:SparkSession,year:Int,month:Int): Unit ={
    val platArr = Array("taobao","tmall","jd")
    for(plat <- platArr){
      val rdd = spark.read.orc(s"s3a://dws-data/g_data/${year}/${month}/${plat}/")
        //只取柯桥区下的数据
        .where("district = '柯桥区'")
        .selectExpr("shopId","firstCategoryId","salesAmount","platformId")
        .registerTempTable("t")

      val taobao = spark.sql(
        """
          |select *  from
          |(select *,row_number() over(partition by shopId,platformid order by sum_sale desc) rank
          |from
          |(
          |select
          |platformId as platformid,shopId,
          |case when firstCategoryId = 'None' then '10099' else firstCategoryId end firstCategoryId,
          |sum(cast(salesAmount as double)) sum_sale from t
          |group by shopId,firstCategoryId,platformId
          |))
          |where rank = 1
          """.stripMargin)
        .withColumnRenamed("firstCategoryId","shop_cate")
        .select("shopId","shop_cate","platformid")
        .repartition(1)
        .write.option("header","true").csv(s"s3a://o2o-dataproces-group/chen_lixiu/新增柯桥区一级主营行业/${year}_${month}/${plat}")
    }
    //保存
    //根据9月份的店铺 关联原来的店铺主营行业的数据  只增不更新
    val df = spark.read.orc("s3a://o2o-dataproces-group/chen_lixiu/jd_cloud/primary_shop_keqiao/*/*")
    df.registerTempTable("t_8")
    val df2 = spark.read.option("header","true").csv(s"s3a://o2o-dataproces-group/chen_lixiu/新增柯桥区一级主营行业/${year}_${month}/*/*")
    df2.registerTempTable("t_9")

    spark.sql(
      """
        |select
        |a.shopId  shopid,
        |a.shop_cate,
        |a.platformid
        |from t_9 a left join t_8 b
        |on a.shopId = b.shopid and a.platformid = b.platformid
        |where b.shopid is null
        |""".stripMargin)
      .select("shopid","shop_cate","platformid")
      .repartition(1)
      .write.orc(s"s3a://o2o-dataproces-group/chen_lixiu/jd_cloud/primary_shop_keqiao/${year}_${month}/")
  }
  //处理玉溪市茶行业的的主营行业
  def Primary_yuxi(spark:SparkSession,year:Int,month:Int): Unit ={
    val platArr = Array("taobao","tmall","jd","suning")
    for(plat <- platArr){
      val rdd = spark.read.orc(s"s3a://dws-data/g_data/${year}/${month}/${plat}/")
        //只取茶行业下的数据
        .where("secondCategoryId = '1002108'")
        .selectExpr("shopId","firstCategoryId","secondCategoryId","thirdCategoryId","salesAmount","platformId")
        .registerTempTable("t")

      val taobao = spark.sql(
        """
          |select *  from
          |(select *,row_number() over(partition by shopId,platformid order by sum_sale desc) rank
          |from
          |(
          |select
          |platformId as platformid,shopId,firstCategoryId,secondCategoryId,
          |case when thirdCategoryId = 'None' then concat(secondCategoryId,'99') else thirdCategoryId end thirdCategoryId,
          |sum(cast(salesAmount as double)) sum_sale from t
          |group by shopId,firstCategoryId,secondCategoryId,thirdCategoryId,platformId
          |))
          |where rank = 1
          """.stripMargin)
        .withColumnRenamed("thirdCategoryId","shop_cate")
        .select("shopId","shop_cate","platformid")
        .repartition(1)
        .write.option("header","true").csv(s"s3a://o2o-dataproces-group/chen_lixiu/新增茶行业主营行业/${year}_${month}/${plat}")
    }
    //保存
    //根据9月份的店铺 关联原来的店铺主营行业的数据  只增不更新
    val df = spark.read.orc("s3a://o2o-dataproces-group/chen_lixiu/jd_cloud/primary_shop_tea/*/*")
    df.registerTempTable("t_8")
    val df2 = spark.read.option("header","true").csv(s"s3a://o2o-dataproces-group/chen_lixiu/新增茶行业主营行业/${year}_${month}/*/*")
    df2.registerTempTable("t_9")

    spark.sql(
      """
        |select * from t_9 where shop_cate is null
        |""".stripMargin).show()
    spark.sql(
      """
        |select
        |a.shopId shopid,
        |a.shop_cate,
        |a.platformid
        |from t_9 a left join t_8 b
        |on a.shopId = b.shopid and a.platformid = b.platformid
        |where b.shopid is null
        |""".stripMargin)
      .select("shopid","shop_cate","platformid")
      .repartition(1)
      .write.orc(s"s3a://o2o-dataproces-group/chen_lixiu/jd_cloud/primary_shop_tea/${year}_${month}/")
  }
  //处理邹城市的主营行业
  def Primary_zoucheng(spark:SparkSession,year:Int,month:Int): Unit ={
    val platArr = Array("taobao","tmall","jd","suning")
    for(plat <- platArr){
      val rdd = spark.read.orc(s"s3a://dws-data/g_data/${year}/${month}/${plat}/")
        //只取食用菌下的数据
        .where("firstCategoryId = '10021'")
        .selectExpr("shopId","firstCategoryId","secondCategoryId","thirdCategoryId","salesAmount","platformId")
        .registerTempTable("t")

      val taobao = spark.sql(
        """
          |select *  from
          |(select *,row_number() over(partition by shopId,platformid order by sum_sale desc) rank
          |from
          |(
          |select
          |platformId as platformid,shopId,firstCategoryId,secondCategoryId,
          |case when thirdCategoryId = 'None' then concat(secondCategoryId,'99') else thirdCategoryId end thirdCategoryId,
          |sum(cast(salesAmount as double)) sum_sale from t
          |group by shopId,firstCategoryId,secondCategoryId,thirdCategoryId,platformId
          |))
          |where rank = 1
          """.stripMargin)
        .withColumnRenamed("thirdCategoryId","shop_cate")
        .select("shopId","shop_cate","platformid")
        .repartition(1)
        .write.option("header","true").csv(s"s3a://o2o-dataproces-group/chen_lixiu/新增食用菌行业主营行业/${year}_${month}/${plat}")
    }
    //保存
    //根据9月份的店铺 关联原来的店铺主营行业的数据  只增不更新
    val df = spark.read.orc("s3a://o2o-dataproces-group/chen_lixiu/jd_cloud/primary_shop_fungi/*/*")
    df.registerTempTable("t_8")
    val df2 = spark.read.option("header","true").csv(s"s3a://o2o-dataproces-group/chen_lixiu/新增食用菌行业主营行业/${year}_${month}/*/*")
    df2.registerTempTable("t_9")

    spark.sql(
      """
        |select
        |a.shopId shopid,
        |a.shop_cate,
        |a.platformid
        |from t_9 a left join t_8 b
        |on a.shopId = b.shopid and a.platformid = b.platformid
        |where b.shopid is null
        |""".stripMargin)
      .select("shopid","shop_cate","platformid")
      .repartition(1)
      .write.orc(s"s3a://o2o-dataproces-group/chen_lixiu/jd_cloud/primary_shop_fungi/${year}_${month}/")
  }




}
