package com.o2o.regularservice.plat10099

import java.io.File

import com.alibaba.fastjson.{JSON, JSONObject}
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{FileSystem, Path}
import org.apache.spark.sql.{DataFrame, SparkSession}
import org.elasticsearch.spark._

/**
  * @author: gaoyadi
  * @Date: 2018/9/4 15:37
  * @Description:
  * @Modify By:
  */
object CateUtil10099 {
  val spark = SparkSession.builder()
    .appName("SparkTest")
    .master("local[*]")
    .config("es.nodes", "192.168.1.157")
    .config("es.port", "9200")
    .config("cluster.name","O2OElastic")
    .config("es.net.http.auth.user", "elastic")
    .config("es.net.http.auth.pass", "changeme")
    .getOrCreate()

  val sc = spark.sparkContext
  sc.hadoopConfiguration.set("fs.s3a.access.key", "GAO7EO9FWKPJ8WFCQDME")
  sc.hadoopConfiguration.set("fs.s3a.secret.key", "LZ0xaHBSYKHaJ9ECDbX9f7zin79UZkXfGoNapRPL")
  sc.hadoopConfiguration.set("fs.s3a.endpoint", "https://obs.cn-north-1.myhuaweicloud.com")
  sc.setLogLevel("ERROR")

  val sqlcontext = spark.sqlContext

  var platform = "rongyigou"
//  var platform = "miya"
  var year = "2020"
  var month = "5"
  val index = s"${year}_${platform}/${platform}_${year}_${month}"

  var sourcePath99 = s"s3a://o2o-dataproces-group/zsc/plat10099/${platform}/${year}/${month}/source_10099"  //拉取的其他分类的原始数据
  var needMatchCatePath = s"D:\\A_平台处理\\10099\\${year}\\${platform}\\${month}"    //需要匹配的分类数据   放在本地路径，便于匹配
  var modifiedMatchCatePath = s"D:\\A_平台处理\\10099\\${year}\\${platform}\\${month}\\${platform}.csv"  //匹配好的分类  只需把上个路径改个名字
  var newAddCatePath = s"D:\\A_平台处理\\10099\\${year}\\${platform}\\${month}\\gai"                     //保存一份匹配好的json分类,方便加到原来的分类表中
  var newAddCatePath_kaola_sub = s"D:\\A_平台处理\\10099\\${year}\\${platform}\\${month}\\gai_sub"       //保存一份匹配好的json分类,方便加到原来的分类表中
  var resultPath = s"s3a://o2o-dataproces-group/zsc/plat10099/${platform}/${year}/${month}/xiugai_10099"   //原始数据关联好匹配上的分类的结果数据

//农产品路径
  var ncpPath = "s3a://o2o-dimension-table/category_table/products_9_3.json"
  val conf = new Configuration()
  conf.set("fs.defaultFS", "hdfs://192.168.1.157:9000");
  conf.set("fs.hdfs.impl", "org.apache.hadoop.hdfs.DistributedFileSystem");

  val fs = FileSystem.get(conf);
  private val file2 = new File(newAddCatePath)
  private val newAddCatePath_isexists: Boolean = file2.exists()
//  private val resultPath_isexists: Boolean = fs.exists(new Path(resultPath))
//  private val sourcePath99_isexists: Boolean = fs.exists(new Path(sourcePath99))

  def main(args: Array[String]): Unit = {
    val frame = extractFromEs()   //从ES中提取其他分类10099的数据
    //第一步：提取10099数据到本地  进行匹配
//    extractEsCate(frame)        //保存一份原始数据，并提取出要匹配的分类

    //第二步：将匹配好的数据更新入库
//    miya_10099(frame)           //关联匹配好的分类
    rongyigou_10099(frame)

    laidUpEs("192.168.1.157")     //入库

  }

  /***
    * 天猫分类处理
    * @param frame 原始数据
    */
  def tmall_10099(frame : DataFrame):Unit ={
    frame
      .drop("firstCategoryId")
      .drop("secondCategoryId")
      .drop("thirdCategoryId")
      .drop("fourthCategoryId")
      .drop("products")
      .registerTempTable("source")

    spark.read.option("header","true").csv(modifiedMatchCatePath).registerTempTable("gai")
    var oo = spark.sqlContext.sql(
      """
        |select
        |categoryId,
        |concat("100",substr(standId,0,2))  firstCategoryId,
        |concat("100",substr(standId,0,4))  secondCategoryId,
        |case when length(substr(standId,0,6)) >4 then concat("100",substr(standId,0,6)) else 'None' end thirdCategoryId,
        |'None' fourthCategoryId,
        |'1000' products
        |from
        |gai
      """.stripMargin
    )
    oo.dropDuplicates("categoryId").repartition(1).write.json(newAddCatePath)
    oo.dropDuplicates("categoryId").registerTempTable("xiugai")

    var frame1 = spark.sql(
      """
        |select
        |a.*,
        |IFNULL(b.firstCategoryId,"10099") firstCategoryId,
        |IFNULL(b.secondCategoryId,"None") secondCategoryId,
        |IFNULL(b.thirdCategoryId,"None") thirdCategoryId,
        |IFNULL(b.fourthCategoryId,"None") fourthCategoryId,
        |IFNULL(b.products,"None") products
        |from
        |source a
        |join
        |xiugai b
        |on a.categoryId = b.categoryId
      """.stripMargin
    )

      frame1.repartition(1).write.json(resultPath)

  }

  /***
    * 苏宁分类处理  以更改100999999的问题
    * @param frame 原始数据
    */
  def suning_10099(frame : DataFrame):Unit = {
    frame
      .drop("firstCategoryId")
      .drop("secondCategoryId")
      .drop("thirdCategoryId")
      .drop("fourthCategoryId")
      .drop("products")
      .registerTempTable("source")

    spark.read.option("header","true").csv(modifiedMatchCatePath).registerTempTable("gai")
    /***
      * 标准分类表处理
      */
    spark.sqlContext.sql(
      """
        |select
        |standid,
        |subCategoryId,
        |concat("100",substr(standId,0,2)) as firstCategoryId,
        |concat("100",substr(standId,0,4)) as secondCategoryId,
        |'1000' products
        |from
        |gai
        """.stripMargin
        ).registerTempTable("gai1")

    spark.sqlContext.sql(
      """
        |select
        |standid,
        |subCategoryId,
        |firstCategoryId,
        |secondCategoryId,
        |case when length(substr(standId,0,6)) >4 then concat("100",substr(standId,0,6)) else concat(secondCategoryId,'99') end thirdCategoryId,
        |'1000' products
        |from
        |gai1
      """.stripMargin
    ).registerTempTable("gai2")

    var oo = spark.sqlContext.sql(
      """
        |select
        |subCategoryId,
        |firstCategoryId,
        |secondCategoryId,
        |thirdCategoryId,
        |concat(thirdCategoryId,'99') fourthCategoryId,
        |'1000' products
        |from
        |gai2
      """.stripMargin
    )

    oo.dropDuplicates("subCategoryId").repartition(1).write.json(newAddCatePath)

    oo.dropDuplicates("subCategoryId").registerTempTable("xiugai")
    var frame2 = spark.sql(
      """
        |select
        |a.*,
        |IFNULL(b.firstCategoryId,"10099") firstCategoryId,
        |IFNULL(b.secondCategoryId,"1009999") secondCategoryId,
        |IFNULL(b.thirdCategoryId,"100999999") thirdCategoryId,
        |IFNULL(b.fourthCategoryId,"10099999999") fourthCategoryId
        |from
        |source a
        |left join
        |xiugai b
        |on a.subCategoryId = b.subCategoryId
      """.stripMargin
    ).registerTempTable("dataAllCate")


    spark.read.json(ncpPath).registerTempTable("products")
    var frame1 = spark.sql(
      """
        |select
        |a.*,
        |IFNULL(b.products,"1000") products
        |from
        |dataAllCate a
        |left join
        |products b
        |on
        |a.thirdCategoryId = b.thirdCategoryId
      """.stripMargin
    ).dropDuplicates("good_id")

     frame1.repartition(1).write.json(resultPath)
  }

  /***
    * 京东分类处理
    * @param frame
    */
  def jd_10099(frame : DataFrame):Unit = {
    frame
      .drop("firstCategoryId")
      .drop("secondCategoryId")
      .drop("thirdCategoryId")
      .drop("fourthCategoryId")
      .drop("products")
      .registerTempTable("source")


    spark.read.option("header","true").csv(modifiedMatchCatePath).registerTempTable("gai")
    /***
      * 标准分类表处理
      */
    var oo = spark.sqlContext.sql(
      """
        |select
        |subCategoryId,
        |concat("100",substr(standId,0,2))  firstCategoryId,
        |concat("100",substr(standId,0,4))  secondCategoryId,
        |case when length(substr(standId,0,6)) >4 then concat("100",substr(standId,0,6)) else 'None' end thirdCategoryId,
        |'None' fourthCategoryId,
        |'1000' products
        |from
        |gai
      """.stripMargin
    )
      oo.dropDuplicates("subCategoryId").repartition(1).write.json(newAddCatePath)

    oo.dropDuplicates("subCategoryId").registerTempTable("xiugai")
    /***
      * 关联分类
      */
    var frame1 = spark.sql(
      """
        |select
        |a.*,
        |IFNULL(b.firstCategoryId,"10099") firstCategoryId,
        |IFNULL(b.secondCategoryId,"None") secondCategoryId,
        |IFNULL(b.thirdCategoryId,"None") thirdCategoryId,
        |IFNULL(b.fourthCategoryId,"None") fourthCategoryId,
        |IFNULL(b.products,"None") products
        |from
        |source a
        |join
        |xiugai b
        |on a.subCategoryId = b.subCategoryId
      """.stripMargin
    )

        frame1.repartition(1).write.json(resultPath)

  }


  /***
    * 云集分类处理  ----------  已更改100999999的问题
    * @param frame
    */
  def yunji_10099(frame : DataFrame):Unit = {
    frame
      .drop("firstCategoryId")
      .drop("secondCategoryId")
      .drop("thirdCategoryId")
      .drop("fourthCategoryId")
      .drop("products")
      .registerTempTable("source")

    spark.read.option("header","true").csv(modifiedMatchCatePath).registerTempTable("gai")

    /***
      * 标准分类表处理
      *
      */
        /*var oo = spark.sqlContext.sql(
          """
            |select
            |subCategoryId,
            |concat("100",substr(standId,0,2))  firstCategoryId,
            |concat("100",substr(standId,0,4))  secondCategoryId,
            |case when length(substr(standId,0,6)) >4 then concat("100",substr(standId,0,6)) else 'None' end thirdCategoryId,
            |'None' fourthCategoryId,
            |'1000' products
            |from
            |gai
          """.stripMargin
        )*/

    spark.sqlContext.sql(
      """
        |select
        |standId,
        |'1000' products,
        |subCategoryId,
        |concat("100",substr(standId,0,2))  firstCategoryId,
        |concat("100",substr(standId,0,4))  secondCategoryId
        |from
        |gai
      """.stripMargin
    ).registerTempTable("gai1")

    spark.sqlContext.sql(
      """
        |select
        |standId,
        |'1000' products,
        |subCategoryId,
        |firstCategoryId,
        |secondCategoryId,
        |case when length(substr(standId,0,6)) >4 then concat("100",substr(standId,0,6)) else concat(secondCategoryId,'99') end thirdCategoryId
        |from
        |gai1
      """.stripMargin
    ).registerTempTable("gai2")

    var oo = spark.sqlContext.sql(
      """
        |select
        |subCategoryId,
        |firstCategoryId,
        |secondCategoryId,
        |thirdCategoryId,
        |concat(thirdCategoryId,'99') fourthCategoryId,
        |'1000' products
        |from
        |gai2
      """.stripMargin
    )

//    oo.show()

    oo.dropDuplicates("subCategoryId").repartition(1).write.json(newAddCatePath)
    oo.dropDuplicates("subCategoryId").registerTempTable("xiugai")

    /***
      * 关联分类
      */
    var frame1 = spark.sql(
      """
        |select
        |a.*,
        |IFNULL(b.firstCategoryId,"10099") firstCategoryId,
        |IFNULL(b.secondCategoryId,"1009999") secondCategoryId,
        |IFNULL(b.thirdCategoryId,"100999999") thirdCategoryId,
        |IFNULL(b.fourthCategoryId,"10099999999") fourthCategoryId,
        |IFNULL(b.products,"1000") products
        |from
        |source a
        |join
        |xiugai b
        |on a.subCategoryId = b.subCategoryId
      """.stripMargin
    )

    frame1.repartition(1).write.json(resultPath)

  }


  /***
    * 考拉分类处理----------已更改100999999的问题
    * @param frame
    */
  def kaola_10099(frame : DataFrame):Unit = {
    frame
      .drop("firstCategoryId")
      .drop("secondCategoryId")
      .drop("thirdCategoryId")
      .drop("fourthCategoryId")
      .drop("products")
      .registerTempTable("source")

    spark.read.option("header","true").csv(modifiedMatchCatePath).registerTempTable("gai")

    //1、detailCategoryId != -1的情况  standid
    spark.sqlContext.sql(
      """
        |select
        |standid,
        |detailCategoryId,
        |concat("100",substr(standId,0,2)) as firstCategoryId,
        |concat("100",substr(standId,0,4)) as secondCategoryId
        |from
        |gai
        |where detailCategoryId != -1
      """.stripMargin
    ).registerTempTable("gai1")

    spark.sqlContext.sql(
      """
        |select
        |standid,
        |detailCategoryId,
        |firstCategoryId,
        |secondCategoryId,
        |case when length(substr(standId,0,6)) >4 then concat("100",substr(standId,0,6)) else concat(secondCategoryId,'99') end thirdCategoryId
        |from
        |gai1
      """.stripMargin
    ).registerTempTable("gai2")

    var oo = spark.sqlContext.sql(
      """
        |select
        |detailCategoryId,
        |firstCategoryId,
        |secondCategoryId,
        |thirdCategoryId,
        |concat(thirdCategoryId,'99') fourthCategoryId
        |from
        |gai2
      """.stripMargin
    )

    //2、detailCategoryId = -1的情况
    spark.sqlContext.sql(
      """
        |select
        |standid,
        |subCategoryId,
        |concat("100",substr(standId,0,2)) as firstCategoryId,
        |concat("100",substr(standId,0,4)) as secondCategoryId
        |from
        |gai
        |where detailCategoryId = -1
      """.stripMargin
    ).registerTempTable("gai11")

    spark.sqlContext.sql(
      """
        |select
        |standid,
        |subCategoryId,
        |firstCategoryId,
        |secondCategoryId,
        |case when length(substr(standId,0,6)) >4 then concat("100",substr(standId,0,6)) else concat(secondCategoryId,'99') end thirdCategoryId
        |from
        |gai11
      """.stripMargin
    ).registerTempTable("gai22")

    var oo1 = spark.sqlContext.sql(
      """
        |select
        |subCategoryId,
        |firstCategoryId,
        |secondCategoryId,
        |thirdCategoryId,
        |concat(thirdCategoryId,'99') fourthCategoryId
        |from
        |gai22
      """.stripMargin
    )

    oo.where("detailCategoryId != '-1'").repartition(1).write.json(newAddCatePath)
    oo1.where("subCategoryId != '-1'").repartition(1).write.json(newAddCatePath_kaola_sub)

    oo.dropDuplicates("detailCategoryId").registerTempTable("xiugai")
    oo1.dropDuplicates("subCategoryId").registerTempTable("xiugai1")

    var t1 = spark.sql(
      """
        |select
        |a.*,
        |IFNULL(b.firstCategoryId,"10099") firstCategoryId,
        |IFNULL(b.secondCategoryId,"1009999") secondCategoryId,
        |IFNULL(b.thirdCategoryId,"100999999") thirdCategoryId,
        |IFNULL(b.fourthCategoryId,"10099999999") fourthCategoryId
        |from
        |source a
        |join
        |xiugai b
        |on
        |a.detailCategoryId = b.detailCategoryId
      """.stripMargin
    )

    var t2 = spark.sql(
      """
        |select
        |c.*,
        |IFNULL(d.firstCategoryId,"10099") firstCategoryId,
        |IFNULL(d.secondCategoryId,"1009999") secondCategoryId,
        |IFNULL(d.thirdCategoryId,"100999999") thirdCategoryId,
        |IFNULL(d.fourthCategoryId,"10099999999") fourthCategoryId
        |from
        |(select
        |a.*
        |from
        |source a
        |left join
        |xiugai b
        |on
        |a.detailCategoryId = b.detailCategoryId
        |where b.detailCategoryId is null) c
        |left join
        |xiugai1 d
        |on
        |c.subCategoryId = d.subCategoryId
      """.stripMargin
    )

//    t1.union(t2).show()
    t1.union(t2).repartition(1).write.json(resultPath)
  }

  /***
    * 蜜芽分类处理-----已更改100999999的问题
    * @param frame
    */
  def miya_10099(frame : DataFrame):Unit = {
    frame
      .drop("firstCategoryId")
      .drop("secondCategoryId")
      .drop("thirdCategoryId")
      .drop("fourthCategoryId")
      .drop("products")
      .registerTempTable("source")

    spark.read.option("header","true").csv(modifiedMatchCatePath).registerTempTable("gai")

    /*var oo = spark.sqlContext.sql(
      """
        |select
        |subCategoryId,
        |concat("100",substr(standId,0,2))  firstCategoryId,
        |concat("100",substr(standId,0,4))  secondCategoryId,
        |case when length(substr(standId,0,6)) >4 then concat("100",substr(standId,0,6)) else 'None' end thirdCategoryId,
        |'None' fourthCategoryId,
        |---case when length(substr(standId,0,8)) >6 then concat("100",substr(standId,0,8)) else 'None' end fourthCategoryId,
        |'1000' products
        |from
        |gai
      """.stripMargin
    )*/

    spark.sqlContext.sql(
      """
        |select
        |standId,
        |'1000' products,
        |subCategoryId,
        |concat("100",substr(standId,0,2))  firstCategoryId,
        |concat("100",substr(standId,0,4))  secondCategoryId
        |from
        |gai
      """.stripMargin
    ).registerTempTable("gai1")

    spark.sqlContext.sql(
      """
        |select
        |standId,
        |'1000' products,
        |subCategoryId,
        |firstCategoryId,
        |secondCategoryId,
        |case when length(substr(standId,0,6)) >4 then concat("100",substr(standId,0,6)) else concat(secondCategoryId,'99') end thirdCategoryId
        |from
        |gai1
      """.stripMargin
    ).registerTempTable("gai2")

    var oo = spark.sqlContext.sql(
      """
        |select
        |subCategoryId,
        |firstCategoryId,
        |secondCategoryId,
        |thirdCategoryId,
        |case when length(substr(standId,0,8)) >6 then concat("100",substr(standId,0,8)) else concat(thirdCategoryId,'99') end fourthCategoryId,
        |--concat(thirdCategoryId,'99') fourthCategoryId,
        |'1000' products
        |from
        |gai2
      """.stripMargin
    )

    oo.dropDuplicates("subCategoryId").repartition(1).write.json(newAddCatePath)

    oo.dropDuplicates("subCategoryId").registerTempTable("xiugai")

    var frame1 = spark.sql(
      """
        |select
        |a.*,
        |IFNULL(b.firstCategoryId,"10099") firstCategoryId,
        |IFNULL(b.secondCategoryId,"1009999") secondCategoryId,
        |IFNULL(b.thirdCategoryId,"100999999") thirdCategoryId,
        |IFNULL(b.fourthCategoryId,"10099999999") fourthCategoryId,
        |IFNULL(b.products,"1000") products
        |from
        |source a
        |join
        |xiugai b
        |on a.subCategoryId = b.subCategoryId
      """.stripMargin
    )
//    frame1.show()
      frame1.repartition(1).write.json(resultPath)
  }


  /***
    * 拼多多分类处理 -------- 已更改100999999的问题
    * @param frame
    */
  def pinduoduo_10099(frame : DataFrame):Unit = {
    frame
      .drop("firstCategoryId")
      .drop("secondCategoryId")
      .drop("thirdCategoryId")
      .drop("fourthCategoryId")
      .drop("products")
      .registerTempTable("source")

    val frame3 = spark.read.option("header", "true").option("delimiter",",").csv(modifiedMatchCatePath)
//    frame3.show()


    frame3 .registerTempTable("gai")

    spark.sqlContext.sql(
      """
        |select
        |standId,
        |subCategoryId,
        |concat("100",substr(standId,0,2))  firstCategoryId,
        |concat("100",substr(standId,0,4))  secondCategoryId
        |from
        |gai
      """.stripMargin
    ).registerTempTable("gai1")

    spark.sqlContext.sql(
      """
        |select
        |standId,
        |subCategoryId,
        |firstCategoryId,
        |secondCategoryId,
        |case when length(substr(standId,0,6)) >4 then concat("100",substr(standId,0,6)) else concat(secondCategoryId,'99') end thirdCategoryId
        |from
        |gai1
      """.stripMargin
    ).registerTempTable("gai2")

    var oo = spark.sqlContext.sql(
      """
        |select
        |subCategoryId,
        |firstCategoryId,
        |secondCategoryId,
        |thirdCategoryId,
        |case when length(substr(standId,0,8)) >6 then concat("100",substr(standId,0,8)) else concat(thirdCategoryId,'99') end fourthCategoryId
        |--concat(thirdCategoryId,'99') fourthCategoryId
        |from
        |gai2
      """.stripMargin
    )

    //保存新增的json追加到分类表中
    oo.dropDuplicates("subCategoryId").repartition(1).write.json(newAddCatePath)
    oo.dropDuplicates("subCategoryId").registerTempTable("xiugai")

    var frame2 = spark.sql(
      """
        |select
        |a.*,
        |IFNULL(b.firstCategoryId,"10099") firstCategoryId,
        |IFNULL(b.secondCategoryId,"1009999") secondCategoryId,
        |IFNULL(b.thirdCategoryId,"100999999") thirdCategoryId,
        |IFNULL(b.fourthCategoryId,"10099999999") fourthCategoryId
        |from
        |source a
        |join
        |xiugai b
        |on a.subCategoryId = b.subCategoryId
      """.stripMargin
    ).registerTempTable("dataAllCate")


    spark.read.json(ncpPath).registerTempTable("products")
    var frame1 = spark.sql(
      """
        |select
        |a.*,
        |IFNULL(b.products,"1000") products
        |from
        |dataAllCate a
        |left join
        |products b
        |on
        |a.thirdCategoryId = b.thirdCategoryId
      """.stripMargin
    ).dropDuplicates("good_id")

    frame1.repartition(1).write.json(resultPath)

  }


  /***
    * 宝宝树分类处理--- 已更改100999999的问题
    * @param frame
    */
  def babytree_10099(frame : DataFrame):Unit = {
    frame
      .drop("firstCategoryId")
      .drop("secondCategoryId")
      .drop("thirdCategoryId")
      .drop("fourthCategoryId")
      .drop("products")
      .registerTempTable("source")

    spark.read.option("header","true").csv(modifiedMatchCatePath).registerTempTable("gai")

    /*var oo = spark.sqlContext.sql(
      """
        |select
        |categoryId,
        |concat("100",substr(standId,0,2))  firstCategoryId,
        |concat("100",substr(standId,0,4))  secondCategoryId,
        |case when length(substr(standId,0,6)) >4 then concat("100",substr(standId,0,6)) else 'None' end thirdCategoryId,
        |'None' fourthCategoryId
        |from
        |gai
      """.stripMargin
    )*/

    spark.sqlContext.sql(
      """
        |select
        |standId,
        |categoryId,
        |concat("100",substr(standId,0,2))  firstCategoryId,
        |concat("100",substr(standId,0,4))  secondCategoryId
        |from
        |gai
      """.stripMargin
    ).registerTempTable("gai1")

    spark.sqlContext.sql(
      """
        |select
        |standId,
        |categoryId,
        |firstCategoryId,
        |secondCategoryId,
        |case when length(substr(standId,0,6)) >4 then concat("100",substr(standId,0,6)) else concat(secondCategoryId,'99') end thirdCategoryId
        |from
        |gai1
      """.stripMargin
    ).registerTempTable("gai2")

    var oo = spark.sqlContext.sql(
      """
        |select
        |categoryId,
        |firstCategoryId,
        |secondCategoryId,
        |thirdCategoryId,
        |concat(thirdCategoryId,'99') fourthCategoryId
        |from
        |gai2
      """.stripMargin
    )

    oo.dropDuplicates("categoryId").repartition(1).write.json(newAddCatePath)

    oo.dropDuplicates("categoryId").registerTempTable("xiugai")

    var frame1 = spark.sql(
      """
        |select
        |a.*,
        |IFNULL(b.firstCategoryId,"10099") firstCategoryId,
        |IFNULL(b.secondCategoryId,"1009999") secondCategoryId,
        |IFNULL(b.thirdCategoryId,"100999999") thirdCategoryId,
        |IFNULL(b.fourthCategoryId,"10099999999") fourthCategoryId
        |from
        |source a
        |join
        |xiugai b
        |on a.categoryId = b.categoryId
      """.stripMargin
    )

    frame1.show()
      frame1.repartition(1).write.json(resultPath)

  }

  /***
    * 淘宝分类处理
    * @param frame
    */
  def taobao_10099(frame : DataFrame):Unit = {
    frame
      .drop("firstCategoryId")
      .drop("secondCategoryId")
      .drop("thirdCategoryId")
      .drop("fourthCategoryId")
      .drop("products")
      .registerTempTable("source")

    spark.read.option("header","true").csv(modifiedMatchCatePath).registerTempTable("gai")
    var oo = spark.sqlContext.sql(
      """
        |select
        |categoryId,
        |concat("100",substr(standId,0,2))  firstCategoryId,
        |concat("100",substr(standId,0,4))  secondCategoryId,
        |case when length(substr(standId,0,6)) >4 then concat("100",substr(standId,0,6)) else 'None' end thirdCategoryId,
        |'None' fourthCategoryId,
        |'1000' products
        |from
        |gai
      """.stripMargin
    )

    oo.dropDuplicates("categoryId").registerTempTable("xiugai")

    var frame1 = spark.sql(
      """
        |select
        |a.*,
        |IFNULL(b.firstCategoryId,"10099") firstCategoryId,
        |IFNULL(b.secondCategoryId,"None") secondCategoryId,
        |IFNULL(b.thirdCategoryId,"None") thirdCategoryId,
        |IFNULL(b.fourthCategoryId,"None") fourthCategoryId,
        |IFNULL(b.products,"None") products
        |from
        |source a
        |join
        |xiugai b
        |on a.categoryId = b.categoryId
      """.stripMargin
    )

      frame1.repartition(1).write.json(resultPath)

  }

  /***
    * 融易购分类处理
    * @param frame
    */
  def rongyigou_10099(frame : DataFrame):Unit = {
    frame
      .drop("firstCategoryId")
      .drop("secondCategoryId")
      .drop("thirdCategoryId")
      .drop("fourthCategoryId")
      .drop("products")
      .registerTempTable("source")


    spark.read.option("header","true").csv(modifiedMatchCatePath).registerTempTable("gai")

    /*var oo = spark.sqlContext.sql(
      """
        |select
        |subCategoryId,
        |concat("100",substr(standId,0,2))  firstCategoryId,
        |concat("100",substr(standId,0,4))  secondCategoryId,
        |case when length(substr(standId,0,6)) >4 then concat("100",substr(standId,0,6)) else 'None' end thirdCategoryId,
        |'None' fourthCategoryId,
        |'1000' products
        |from
        |gai
      """.stripMargin
    )*/

    spark.sqlContext.sql(
      """
        |select
        |standId,
        |subCategoryId,
        |concat("100",substr(standId,0,2))  firstCategoryId,  --220601
        |concat("100",substr(standId,0,4))  secondCategoryId
        |from
        |gai
      """.stripMargin
    ).registerTempTable("gai1")

    spark.sqlContext.sql(
      """
        |select
        |standId,
        |subCategoryId,
        |firstCategoryId,
        |secondCategoryId,
        |case when length(substr(standId,0,6)) >4 then concat("100",substr(standId,0,6)) else concat(secondCategoryId,'99') end thirdCategoryId
        |from
        |gai1
      """.stripMargin
    ).registerTempTable("gai2")

    var oo = spark.sqlContext.sql(
      """
        |select
        |subCategoryId,
        |firstCategoryId,
        |secondCategoryId,
        |thirdCategoryId,
        |case when length(substr(standId,0,8)) >6 then concat("100",substr(standId,0,8)) else concat(thirdCategoryId,'99') end fourthCategoryId
        |--concat(thirdCategoryId,'99') fourthCategoryId
        |from
        |gai2
      """.stripMargin
    )

    if(!newAddCatePath_isexists) {
      oo.dropDuplicates("subCategoryId").repartition(1).write.json(newAddCatePath)
    }

    oo.dropDuplicates("subCategoryId").registerTempTable("xiugai")

    var frame1 = spark.sql(
      """
        |select
        |a.*,
        |IFNULL(b.firstCategoryId,"10099") firstCategoryId,
        |IFNULL(b.secondCategoryId,"1009999") secondCategoryId,
        |IFNULL(b.thirdCategoryId,"100999999") thirdCategoryId,
        |IFNULL(b.fourthCategoryId,"10099999999") fourthCategoryId
        |from
        |source a
        |join
        |xiugai b
        |on a.subCategoryId = b.subCategoryId
      """.stripMargin
    )

//    frame1.show()
    frame1.repartition(1).write.json(resultPath)
  }

  /***
    * 顺丰分类处理
    * @param frame
    */
  def shunfeng_10099(frame : DataFrame):Unit = {
    frame
      .drop("firstCategoryId")
      .drop("secondCategoryId")
      .drop("thirdCategoryId")
      .drop("fourthCategoryId")
      .drop("products")
      .registerTempTable("source")
    frame.show()


    spark.read.option("header","true").csv(modifiedMatchCatePath).registerTempTable("gai")
    var oo = spark.sqlContext.sql(
      """
        |select
        |subCategoryId,
        |concat("100",substr(standId,0,2))  firstCategoryId,
        |concat("100",substr(standId,0,4))  secondCategoryId,
        |case when length(substr(standId,0,6)) >4 then concat("100",substr(standId,0,6)) else 'None' end thirdCategoryId,
        |'None' fourthCategoryId,
        |'1000' products
        |from
        |gai
      """.stripMargin
    )

    if(!newAddCatePath_isexists){
      oo.dropDuplicates("subCategoryId").repartition(1).write.json(newAddCatePath)
    }


    oo.dropDuplicates("subCategoryId").registerTempTable("xiugai")

    val frame1 = spark.sql(
      """
        |select
        |a.*,
        |IFNULL(b.firstCategoryId,"10099") firstCategoryId,
        |IFNULL(b.secondCategoryId,"None") secondCategoryId,
        |IFNULL(b.thirdCategoryId,"None") thirdCategoryId,
        |IFNULL(b.fourthCategoryId,"None") fourthCategoryId,
        |IFNULL(b.products,"None") products
        |from
        |source a
        |join
        |xiugai b
        |on a.subCategoryId = b.subCategoryId
      """.stripMargin
    )


//    if(!resultPath_isexists){
      frame1.repartition(1).write.json(resultPath)
//    }
  }

  /***
    * 将关联好的数据入库
    * @param esNode
    */
  def laidUpEs(esNode : String): Unit = {
    val value = spark.sparkContext.textFile(resultPath).map(x=>{
      val nObject = JSON.parseObject(x)

      //==================pdd======
      /*var commentCount = "0"
      try {
        commentCount = nObject.get("commentCount").toString
        if (commentCount.equals("None")) {
          commentCount = "0"
        }
      } catch {
        case e: Exception => {
          print(nObject)
        }
      }

      nObject.put("commentCount", commentCount)
      nObject.remove("shop_pre_md5")
      nObject.remove("md5_id")
      nObject.remove("pre_md5")
      nObject.remove("platformName_spelling")
      nObject.remove("md5_shopId")
      nObject.remove("cat_ids")
      nObject.remove("cat_ids_7")
      //修改价格
      val sellCount1 = nObject.get("sellCount").toString.toDouble
      val sellCount = Math.ceil(sellCount1).toInt
      nObject.put("sellCount",sellCount)

      val n = new JSONObject()
      n.put("fuyi","-1")

      //7月份注释了  数据没有
      val ev = nObject.getOrDefault("evaluates",n).toString
      val evaluates = JSON.parseObject(ev)
      nObject.put("evaluates",evaluates)*/
      //===============================


      //修改 液态奶（100210501）、酸奶（100210502） 中是旺旺 或旺仔 品牌的商品  全部放到 其它牛奶乳品（100210599）中
      val thirdCategoryId = nObject.get("thirdCategoryId").toString
      if(thirdCategoryId.equals("100210501") || thirdCategoryId.equals("100210502")){
        val title = nObject.getOrDefault("title","-1").toString
        if(title.contains("旺旺") || title.contains("旺仔")){
          nObject.put("thirdCategoryId","100210599")
          nObject.put("fourthCategoryId","10021059999")
        }
      }
      nObject
    })

      .saveToEs(index, Map("es.mapping.id" -> "good_id", "es.nodes" -> esNode, "es.port" -> "9200", "cluster.name" -> "Es-OTO-Data"))

  }

  /***
    * 提取新增分类并去重复
    * @param frame
    */
  def	extractEsCate(frame : DataFrame): Unit = {
    //back
      frame.repartition(1).write.json(sourcePath99)
      if (platform == "suning") {
          frame.select("rootCategoryId", "rootCategoryName", "categoryId", "categoryName", "subCategoryId", "subCategoryName").dropDuplicates().repartition(1).write.option("header", "true").csv(needMatchCatePath)
      } else if (platform == "tmall") {
        frame.select("rootCategoryId",  "categoryId").dropDuplicates().repartition(1).write.option("header", "true").csv(needMatchCatePath)
      } else if (platform == "kaola") {
        frame.select("rootCategoryId", "rootCategoryName", "categoryId", "categoryName", "subCategoryId", "subCategoryName","detailCategoryId", "detailCategoryName").dropDuplicates().repartition(1).write.option("header", "true").csv(needMatchCatePath)
      } else if (platform == "miya") {
        frame.select("rootCategoryId", "rootCategoryName", "categoryName", "subCategoryId", "subCategoryName").dropDuplicates().repartition(1).write.option("header", "true").csv(needMatchCatePath)
      } else if (platform == "taobao") {
        frame.select("rootCategoryId", "categoryId").dropDuplicates().repartition(1).write.option("header", "true").csv(needMatchCatePath)
      } else if (platform == "babytree") {
        frame.select("rootCategoryId", "rootCategoryName", "categoryId", "categoryName").dropDuplicates().repartition(1).write.option("header", "true").csv(needMatchCatePath)
      } else if (platform == "guomei") {
        frame.select("rootCategoryId", "rootCategoryName", "categoryId", "categoryName", "subCategoryId", "subCategoryName").dropDuplicates().repartition(1).write.option("header", "true").csv(needMatchCatePath)
      } else if (platform == "shunfeng") {
        frame.select("rootCategoryId", "rootCategoryName", "categoryId", "categoryName", "subCategoryId", "subCategoryName").dropDuplicates().repartition(1).write.option("header", "true").csv(needMatchCatePath)
     }else if(platform == "jd"){
        frame.select("rootCategoryId", "rootCategoryName", "categoryId", "categoryName", "subCategoryId", "subCategoryName").dropDuplicates().repartition(1).write.option("header", "true").csv(needMatchCatePath)
      }else if(platform == "yunji"){
        frame.select("rootCategoryId", "rootCategoryName", "categoryId", "categoryName", "subCategoryId", "subCategoryName").dropDuplicates().repartition(1).write.option("header", "true").csv(needMatchCatePath)
      }else if(platform == "pinduoduo"){
        frame.select("rootCategoryId", "rootCategoryName", "categoryId", "categoryName", "subCategoryId", "subCategoryName").dropDuplicates().repartition(1).write.option("header", "true").csv(needMatchCatePath)
      }else if(platform == "rongyigou"){
        frame.select("rootCategoryId", "rootCategoryName", "categoryId", "categoryName", "subCategoryId", "subCategoryName").dropDuplicates().repartition(1).write.option("header", "true").csv(needMatchCatePath)
      }
  }

  /***
    * 1\从ES中提取其他分类的数据
    * /${platform}_2018_${month}
    * @return frame
    */
  def extractFromEs() : DataFrame= {
    var value = sc.esJsonRDD(index,
      """
        |{
        |  "query": {
        |    "term": {
        |      "firstCategoryId": {
        |        "value": "10099"
        |      }
        |    }
        |  }
        |}
        |
      """.stripMargin).values

    var value3 = value.map(x=>{
      val nObject = JSON.parseObject(x)
      nObject.toString
    })
    val frame = spark.read.json(value3)
    frame
  }
}
