import com.alibaba.fastjson.{JSON, JSONObject}
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.functions.lit
import org.apache.spark.sql.{DataFrame, SQLContext, SparkSession}
import org.elasticsearch.spark._

/**
  * @ Auther: o2o-rd-0008
  * @ Date:   2020/8/4 11:27
  * @ Param:  ${PARAM}
  * @ Description: 
  */
object JdEsToObs extends Serializable {
  def main(args: Array[String]): Unit = {
    val spark: SparkSession = SparkSession.builder()
      .appName("SparkTest")
      .config("spark.debug.maxToStringFields", "2000")
      .config("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
      .config("spark.sql.caseSensitive", "true")
      .config("es.nodes", "192.168.1.29")
      .config("es.port", "9200")
      .config("cluster.name","O2OElastic")
      .config("es.net.http.auth.user", "elastic")
      .config("es.net.http.auth.pass", "changeme")
      .master("local[*]")
      .getOrCreate()


    val sc = spark.sparkContext
    sc.hadoopConfiguration.set("fs.s3a.access.key", "GAO7EO9FWKPJ8WFCQDME")
    sc.hadoopConfiguration.set("fs.s3a.secret.key", "LZ0xaHBSYKHaJ9ECDbX9f7zin79UZkXfGoNapRPL")
    sc.hadoopConfiguration.set("fs.s3a.endpoint", "https://obs.cn-north-1.myhuaweicloud.com")
    sc.setLogLevel("WARN")

    val year = 2021
    val index = s"${year}_jd"

    val data = sc.esJsonRDD(index,
      """
        |{
        |  "query": {
        |    "match": {
        |      "subCategoryName.keyword": "显示器"
        |    }
        |  }
        |}
      """.stripMargin).values
     val dat1 = spark.read.json(data).toJSON.rdd.map(lines=>{
      val nObject = JSON.parseObject(lines)

      // 获取Base_Info
      val base_Info : String = nObject.getString("Base_Info")
      val midObj: JSONObject = JSON.parseObject(base_Info)
      nObject.put("fenbianlv",midObj.getString("分辨率"))
      nObject.put("chicun",midObj.getString("尺寸"))

       nObject.toString
    })

    spark.read.json(dat1).repartition(1).write.json("s3a://o2o-dataproces-group/zyf/jd")
//    val rddmid = spark.createDataset(Seq(mid.toString))
//    val frame: DataFrame = spark.read.json(rddmid)
//      frame.write.json("s3a://o2o-dataproces-group/zyf/jd")
    sc.stop()

  }
}
