package com.o2o.cleaning.month.platform.ebusiness_plat.ddmc

import com.alibaba.fastjson.{JSON, JSONObject}
import com.mongodb.spark.MongoSpark
import com.mongodb.spark.config.ReadConfig
import com.mongodb.spark.rdd.MongoRDD
import com.o2o.utils.Iargs
import org.apache.spark.rdd.RDD
import org.apache.spark.sql._
import org.bson.Document


/**
  * 拉取数据
  *
  * @ Auther: o2o-rd-0008
  * @ Date:   2018/11/2 17:13
  * @ Param:  $param$
  * @ Description: 
  */
object MongoDBData {

  def main(args: Array[String]): Unit = {

    val spark = SparkSession.builder()
      .appName(s"${this.getClass.getSimpleName}")
      .config("spark.debug.maxToStringFields", "2000")
      .config("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
      .config("spark.sql.caseSensitive", "true")
      .master("local[*]")
      .getOrCreate()

    val sc = spark.sparkContext
    sc.hadoopConfiguration.set("fs.s3a.access.key", Iargs.OBSACCESS)
    sc.hadoopConfiguration.set("fs.s3a.secret.key", Iargs.OBSSECRET)
    sc.hadoopConfiguration.set("fs.s3a.endpoint", Iargs.OBSENDPOINT)
    sc.setLogLevel("WARN")


    /** *******************重要重要重要重要重要重要 ************************************************************/
    val year = Iargs.YEAR
    val month = Iargs.MONTH
    val timeStamp = Iargs.TIMESAMP
    val obs = "s3a://"
    val platform = "dingdongmc"
    val readUri = "mongodb://root:O2Odata123!@ 192.168.0.149:27017/admin"

    val readDatabase = "DingDongMaiCai"
    //    val readCollection = "ddmc_detail_2107"  这是更换接口前的mongo索引
    val readCollection = if (month.toInt < 10) s"ddmc_detail_v1_210${month}" else s"ddmc_detail_v1_21${month}"
    // 原有分类路径
    val cate_path = s"s3a://o2o-dimension-table/category_table/jd/jd_sub_v26/*"
    //保存本次新增分类保存路径
    val cate_newAdd_path = obs + s"o2o-dimension-table/category_table/dingdongmc_cate/dingdongmc_cate_newAdd/dingdongmc_cate_${year}_${month.toInt}"
    //拉取数据保存路径
    val resultUrl = s"s3a://o2o-sourcedata-${year}/obs-source-${year}/${month}/${platform}/${platform}_${year}_${month}"


    // 读取MongoDB数据
    val dataRDD: RDD[String] = loadMongoData(spark, readUri, readDatabase, readCollection, timeStamp)

    //    val dataFrame: DataFrame = spark.read.json(dataRDD)
    //    val dataFrame: DataFrame =  spark.read.json(sourcePath)
    //
    //    val cateAllDS: Dataset[Row] = dataFrame.selectExpr("rootCategoryId", "rootCategoryName", "categoryId", "categoryName").dropDuplicates()
    //    cateAllDS.repartition(1).write.option("header", true).csv(cate_path)


    //提取新增分类
    //    val new_cate: DataFrame = dataFrame.selectExpr("subCategoryId").except(spark.read.json(cate_path).selectExpr("subCategoryId")).dropDuplicates()
    //      .join(dataFrame.selectExpr("rootCategoryId", "rootCategoryName", "categoryId", "categoryName", "subCategoryId", "subCategoryName"), Seq("subCategoryId"), "left").dropDuplicates()
    //    println("count::::"+new_cate.count())
    //    if (new_cate.collect().length > 0) new_cate.repartition(1).write.json(cate_newAdd_path)

    //    dataRDD.repartition(1).saveAsTextFile(resultUrl)
    spark.read.json(dataRDD).repartition(1).write.orc(resultUrl)

  }


  def loadMongoData(spark: SparkSession, readUri: String, readDatabase: String, readCollection: String, timeStamp: String): RDD[String] = {

    val readConfig = ReadConfig(Map("uri" -> readUri, "database" -> readDatabase, "collection" -> readCollection))

    val mongoRDD: MongoRDD[Document] = MongoSpark.load(spark.sparkContext, readConfig)

    val rdd: RDD[String] = mongoRDD.map(line => {
      val nObject: JSONObject = JSON.parseObject(line.toJson())
      nObject.remove("_id")
      nObject.toString
    })

    rdd
  }


}
