import com.alibaba.fastjson.{JSON, JSONObject}
import com.mongodb.spark.MongoSpark
import com.o2o.utils.Iargs
import com.o2o.utils.times.TimesYearAll
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, SparkSession}

/**
  * @ Auther: o2o-rd-0008
  * @ Date:   2020/6/5 16:23
  * @ Param:  ${PARAM}
  * @ Description: 
  */
object CheckTaobaoCateData {
  def main(args: Array[String]): Unit = {

    val spark = SparkSession.builder()
      .appName(s"${this.getClass.getSimpleName}")
      .config("spark.debug.maxToStringFields", "2000")
      .config("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
      .config("spark.sql.caseSensitive", "true")
      .config("spark.mongodb.input.uri", "mongodb://root:O2Odata123!@ 192.168.0.149:27017/admin")
      .config("spark.mongodb.input.database", s"Tmall")
      .config("spark.mongodb.input.collection", s"tmall_category")
      .master("local[*]")
      .getOrCreate()

    val sc = spark.sparkContext
    sc.hadoopConfiguration.set("fs.s3a.access.key", Iargs.OBSACCESS)
    sc.hadoopConfiguration.set("fs.s3a.secret.key", Iargs.OBSSECRET)
    sc.hadoopConfiguration.set("fs.s3a.endpoint", Iargs.OBSENDPOINT)
    sc.setLogLevel("WARN")

    //平台名称
    var platform = "dingdongmc"
    //当月的月份
    var month = 8
    var lastMonth = 7
    //每个月固定时间戳
    var year = 2020
    var timeStamp = TimesYearAll.TIME202008

    //MongoDB源数据路径
    val sourcePath = s"s3a://o2o-sourcedata/obs-source-${year}/${month}/${platform}/${platform}_${year}_${month}/"

    val index = "2020_dingdongmc_7/type_1"

//    spark.read.json("D:\\taobao\\cate\\tm_cate.json").where("thirdCategoryId='100150199'").select("categoryId").repartition(1).write.json("D:\\taobao\\cate\\result")

    val frame: DataFrame = spark.read.json("D:\\taobao\\cate\\result")
    val rdd = MongoSpark.load(sc)


    val value: RDD[String] = rdd.map(lines => {
      val nObject: JSONObject = JSON.parseObject(lines.toJson())

      nObject.toString
    })


    val frame1: DataFrame = spark.read.json(value)

    val frame2: DataFrame = frame.join(frame1,Seq("categoryId"),"left")

    frame2.repartition(1).write.json("D:\\taobao\\cate\\resultfinal")



/*    import org.elasticsearch.spark._

    val data1 = sc.esJsonRDD(index).values

    val value = spark.read.json(data1).toJSON.rdd.map(line => {
      val nObject = JSON.parseObject(line)
      nObject.toString
    })

    val source: DataFrame = spark.read.json(value)
    source.registerTempTable("source")

    val cate: DataFrame = spark.read.json(sourcePath).dropDuplicates("product_id")

    cate.registerTempTable("cate")

    val df = spark.sql(
      """
        |select
        |a.*,
        |b.subCategoryId as subCategoryIdtmp
        |from
        |source a
        |left join
        |cate b
        |on a.product_id=b.product_id
        |where b.product_id is not null
      """.stripMargin).drop("subCategoryId").withColumnRenamed("subCategoryIdtmp","subCategoryId")
      .drop("firstCategoryId").drop("secondCategoryId").drop("thirdCategoryId").drop("fourthCategoryId")


    val cateDF: DataFrame = spark.read.option("header",true).option("delimiter",",").csv(catePath)
    //    spark.read.json()
    val cateResultDF: DataFrame = cateMatch(spark,df,cateDF)
    val res: Dataset[Row] = cateResultDF.dropDuplicates("good_id")
    res.repartition(2).write.orc("s3a://o2o-dataproces-group/zsc/2020/7/dingdongmc/good_modif/")

    res.toJSON.rdd.map(lines=>{
      val nObject = JSON.parseObject(lines)
      nObject
    }).saveToEs(index,Map("es.mapping.id"->"good_id"))*/


  sc.stop()
}
}
