package com.o2o.cleaning.month.platform.ebusiness_plat.rongegou

import com.alibaba.fastjson.{JSON, JSONArray}
import com.mongodb.spark.MongoSpark
import org.apache.spark.SparkContext
import org.apache.spark.sql.SparkSession

import scala.collection.mutable.ListBuffer

/**
 * 更新rsmsh商品创建时间
 */
object UpdataRsmshCreatTime {
  def main(args: Array[String]): Unit = {
    //

    //    var collection = "taobao_detail_sell_1130"
    for (year <- ListBuffer[String]("2020", "2021", "2022")) {
      //    for (year <- ListBuffer[String]("2020")) {
      var months: ListBuffer[String] = null;
      if (year.eq("2020")) {
        months = ListBuffer[String]("10", "11", "12")
      } else if (year.eq("2021")) {
        months = ListBuffer[String]("01", "02", "03", "04", "05", "06", "07", "08", "09", "10", "11", "12")
      } else if (year.eq("2022")) {
        months = ListBuffer[String]("01")
      }
      for (month <- months) {
        val database = "Rsmsh"
        var collection = s"rsmsh_detail_${year.substring(2, 4)}${month}"
        val spark = SparkSession.builder()
          .appName("read_orc")
          .master("local[*]")
          .config("spark.sql.shuffle.partitions", 200)
          .config("spark.default.parallelism", 200)
          .config("spark.sql.autoBroadcastJoinThreshold", -1)
          .config("spark.mongodb.input.uri", "mongodb://root:O2Odata123!@ 192.168.0.149:27017/admin")
          .config("spark.mongodb.input.database", s"${database}")
          .config("spark.mongodb.input.collection", s"${collection}")
          .getOrCreate()
        val context = spark.sparkContext
        context.hadoopConfiguration.set("fs.s3a.access.key", "GAO7EO9FWKPJ8WFCQDME")
        context.hadoopConfiguration.set("fs.s3a.secret.key", "LZ0xaHBSYKHaJ9ECDbX9f7zin79UZkXfGoNapRPL")
        // 本地集群跑的时候用
        context.hadoopConfiguration.set("fs.s3a.endpoint", "https://obs.cn-north-1.myhuaweicloud.com")
        //        sc.hadoopConfiguration.set("spark.mongodb.input.uri", "mongodb://ob:O2Odata123!@ 192.168.0.56:27017/admin")
        context.setLogLevel("ERROR")


        val rdd = MongoSpark.load(context).rdd.map(doc => {

          val nObject = JSON.parseObject(doc.toJson)
          //获取商品的最小时间
          val array: JSONArray = nObject.getJSONArray("add_to_field")
          nObject.remove("_id")
          nObject.put("createtime", array.getJSONObject(0).getLong("crawl_date"))
          for (i <- 1 until array.size()) {
            val long = array.getJSONObject(i).getLong("crawl_date")
            if (long < nObject.getLong("createtime")) {
              nObject.put("createtime", long)
            }
          }
          nObject.toJSONString
        })
        val frame = spark.read.json(rdd)


        //读取数据
        frame.printSchema()
        frame.selectExpr("good_id", "createtime")
          .coalesce(1)
          //          .write.orc(s"D:\\结果文件\\日上免税行\\商品数据\\${year}\\${month}")
          .write.json(s"s3a://o2o-dataproces-group/zyf/pullData/rsmsh1/${year}_${month}")

        spark.close()
      }
    }


    /*val spark = SparkSession.builder()
      .appName("read_orc")
      //      .master("local[*]")
      .config("spark.sql.shuffle.partitions", 200)
      .config("spark.default.parallelism", 200)
      .config("spark.sql.autoBroadcastJoinThreshold", -1)
      .config("spark.mongodb.input.uri", "mongodb://ob:O2Odata123!@ 192.168.0.203:27017/admin")
      .config("spark.mongodb.input.database", s"${database}")
      .config("spark.mongodb.input.collection", s"${collection}")
      .getOrCreate()
    //创建SparkContext环境
    val sc = spark.sparkContext
    sc.hadoopConfiguration.set("fs.s3a.access.key", "GAO7EO9FWKPJ8WFCQDME")
    sc.hadoopConfiguration.set("fs.s3a.secret.key", "LZ0xaHBSYKHaJ9ECDbX9f7zin79UZkXfGoNapRPL")
    // 本地集群跑的时候用
    sc.hadoopConfiguration.set("fs.s3a.endpoint", "https://obs.cn-north-1.myhuaweicloud.com")
    //        sc.hadoopConfiguration.set("spark.mongodb.input.uri", "mongodb://ob:O2Odata123!@ 192.168.0.56:27017/admin")
    sc.setLogLevel("ERROR")

    //读取分类细分后的数据
    // val frame: DataFrame = spark.read.option("header",true).csv("s3a://o2o-dataproces-group/tong_yitong/test/pullShopIdUserId/")
    val value: MongoRDD[Document] = MongoSpark.load(sc)
    val mapRdd: RDD[String] = value.map(line => {
      val nObject: JSONObject = JSON.parseObject(line.toJson())
      nObject.toString
    })
    spark.read.json(mapRdd).write.orc(s"o2o-dataproces-group/tong_yitong/pullData/taobao/${collection}")

    spark.close()*/

  }
}
