package com.o2o.cleaning.month.platform.ebusiness_plat.jumei_2019_7.Jumei_utils

import org.apache.spark.sql.SparkSession

object addNewCate_LYC {
  def main(args: Array[String]): Unit = {

    val spark = SparkSession.builder()
      .master("local[*]")
      .appName("jumei_isnosell")
      .getOrCreate()
    val sc = spark.sparkContext
    val sqlContext = spark.sqlContext
    sc.hadoopConfiguration.set("fs.s3a.access.key", "GAO7EO9FWKPJ8WFCQDME")
    sc.hadoopConfiguration.set("fs.s3a.secret.key", "LZ0xaHBSYKHaJ9ECDbX9f7zin79UZkXfGoNapRPL")
    sc.hadoopConfiguration.set("fs.s3a.endpoint", "https://obs.cn-north-1.myhuaweicloud.com")
    sc.setLogLevel("WARN")

//    上月的分类表路径
    sqlContext.read.json("s3a://o2o-dimension-table/category_table/categoryFile_tao/jumei/")
        .registerTempTable("cateTable_old")


    //  当月原始数据
    sqlContext.read.json("s3a://o2o-sourcedata/obs-source-2019/6/Jumei/jumei_1906/")
      .select("rootCategoryName","rootCategoryId","categoryName","categoryId","subCategoryName","subCategoryId")
      .registerTempTable("sourceData")

    sqlContext.sql(
      """
        |select a.*
        |from sourceData a
        |left join
        |cateTable_old b
        |on a.subCategoryId = b.subCategoryId
        |where b.subCategoryId is null
      """.stripMargin)
      .dropDuplicates("subCategoryId")
      .repartition(1).write.csv("E:\\李营超\\盒马\\聚美每月新增分类\\6\\cate_2019_6\\")







    sc.stop()
  }
}
