package com.o2o.cleaning.month.platform.ebusiness_plat.alibaba

import com.alibaba.fastjson.JSON
import com.o2o.utils.Iargs
import org.apache.spark.sql.{DataFrame, SparkSession}

object Alibaba {

  def main(args: Array[String]): Unit = {
    val spark = SparkSession.builder()
      .appName(s"${this.getClass.getSimpleName}")
      .config("spark.debug.maxToStringFields", "2000")
      .config("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
      .config("spark.sql.caseSensitive", "true")
//            .master("local[*]")
      .getOrCreate()

    val sc = spark.sparkContext
    sc.hadoopConfiguration.set("fs.s3a.access.key", Iargs.OBSACCESS)
    sc.hadoopConfiguration.set("fs.s3a.secret.key", Iargs.OBSSECRET)
    sc.hadoopConfiguration.set("fs.s3a.endpoint", Iargs.OBSENDPOINT)
    sc.setLogLevel("WARN")

    /** *******************  重要    修改 *************************************/

    val hdfs_188 = "hdfs://192.168.2.188:9000/"
    val obs = "s3a://"

    val platform_Name = "Alibaba"

    val year = Iargs.YEAR
    val month = Iargs.MONTH
    val time_stamp = Iargs.TIMESAMP

    //每月详情数据路径
    val data_path = obs + s"o2o-sourcedata/obs-source-${year}/${month}/${platform_Name}/${platform_Name}_${year}_${month}"

    //每月地址路径
    val address_path = s"s3a://o2o-dimension-table/address_table/address_source_data_2020/address_platform_newAdd/2020_${month}/${platform_Name.toLowerCase}_address_2020_${month}"
//    val address_path = s"s3a://o2o-dimension-table/address_table/address_source_data_2020/address_platform_newAdd/2020_3/${platform_Name.toLowerCase}_address_2020_3"

    val data_df: DataFrame = spark.read.json(data_path)

    var rdd = data_df.dropDuplicates("good_id").toJSON.rdd.map(line => {
      val nObject = JSON.parseObject(line)

      val salesAmount = nObject.getOrDefault("sales_30","-1").toString.toDouble
      val sellCount = nObject.getOrDefault("sell_count_30","-1").toString.toInt

      nObject.put("priceText", (salesAmount/sellCount.toDouble).formatted("%.2f").toDouble)
      nObject.put("sellCount", sellCount)
      nObject.put("salesAmount", salesAmount)

      nObject.toString
    })

    //清洗好的结果数据
    val dataframe = spark.read.json(rdd).filter("sellCount >'0' and priceText >'0'")

    dataframe.select("good_id","sellCount","salesAmount").groupBy()
      .agg("good_id" -> "count", "sellCount" -> "sum", "salesAmount" -> "sum").show(false)


    //读地址数据
    val address_df: DataFrame = spark.read.json(address_path).selectExpr("shopId","province","city","county as district","regional_ID").dropDuplicates()

    val result_df = dataframe.join(address_df, Seq("shopId"), "left")


    val ress = result_df.toJSON.rdd.map(line => {
      val lines = JSON.parseObject(line)

      lines
    })

    println(s"    开始入库    ==     node_247   ==  ${year}_${platform_Name}_${month}  ")

    import org.elasticsearch.spark._
    ress.saveToEs(
      s"247_${year}_alibaba/alibaba_${year}_${month}",
      Map("es.mapping.id" -> "good_id",
        "es.nodes" -> "192.168.1.29",
        "es.net.http.auth.user" -> "elastic",
        "es.net.http.auth.pass" -> "changeme",
        "es.port" -> "9200",
        "cluster.name" -> "O2OElastic"))

    println("*****************  已入完  *****************")

    sc.stop()
  }
}
