package com.o2o.cleaning.month.platform.ebusiness_plat.ddmc

import com.alibaba.fastjson.{JSON, JSONObject}
import com.o2o.cleaning.month.platform.ebusiness_plat.ddmc.DingDongMaiCai.{addressMatch, addressPath}
import org.apache.spark.sql.{DataFrame, SparkSession}

object DingdongMc_es247_g_data {

  def main(args: Array[String]): Unit = {

    val spark = SparkSession.builder()
      .appName("SparkTest")
      .master("local[*]")
      .config("es.nodes", "192.168.1.29")
      .config("es.port", "9200")
      .config("es.net.http.auth.user", "elastic")
      .config("es.net.http.auth.pass", "changeme")
      .config("es.batch.write.retry.count", "10")
      .config("es.batch.write.retry.wait", "60")
      .config("cluster.name", "O2OElastic")
      .getOrCreate()
    val sc = spark.sparkContext
    val sqlcontext = spark.sqlContext

    sc.hadoopConfiguration.set("fs.s3a.access.key", "GAO7EO9FWKPJ8WFCQDME")
    sc.hadoopConfiguration.set("fs.s3a.secret.key", "LZ0xaHBSYKHaJ9ECDbX9f7zin79UZkXfGoNapRPL")
    sc.hadoopConfiguration.set("fs.s3a.endpoint", "https://obs.cn-north-1.myhuaweicloud.com")

    /** *
      * 需要修改的参数信息
      */
    var year ="2020"
    var month = "7"
    var platform = "dingdongmc"
    var index247 = s"247_${year}_${platform}/${platform}_${year}_${month}"

    import org.elasticsearch.spark._

//    val values: RDD[String] = sc.esJsonRDD(index247).values
//    spark.read.json(values).write.orc("s3a://o2o-dataproces-group/zsc/2020/7/dingdongmc/good_bu_address")


    val frame: DataFrame = spark.read.orc("s3a://o2o-dataproces-group/zsc/2020/7/dingdongmc/good_bu_address")
    val addressDF: DataFrame = spark.read.json(addressPath)
    val addressResultDF: DataFrame = addressMatch(spark,frame,addressDF).cache()

    addressResultDF.write.orc("s3a://o2o-dataproces-group/zsc/2020/7/dingdongmc/good_bu_address_result_new")

    addressResultDF.toJSON.rdd.map(lines=>{
      val nObject: JSONObject = JSON.parseObject(lines)

      nObject
    }).saveToEs(index247, Map("es.mapping.id" -> "good_id"))

    /*val res1: Dataset[Row] = spark.read.orc("s3a://dws-data/g_data/2020/9/dingdongmc").dropDuplicates("product_id")

    res1.write.orc("s3a://dws-data/g_data/2020/9/dingdongmc_new")

    val res2: Dataset[Row] = spark.read.orc("s3a://dws-data/g_data/2020/8/dingdongmc").dropDuplicates("product_id")

    res2.write.orc("s3a://dws-data/g_data/2020/8/dingdongmc_new")

    val res3: Dataset[Row] = spark.read.orc("s3a://dws-data/g_data/2020/7/dingdongmc").dropDuplicates("product_id")

    res3.write.orc("s3a://dws-data/g_data/2020/7/dingdongmc_new")*/

//    import org.apache.spark.sql.functions._
//
//    res.agg(count("*"),sum("sellCount"),sum("salesAmount")).show(false)

  }

}
