package com.o2o.cleaning.month.platform.ebusiness_plat.wangyiyanxuan

import com.alibaba.fastjson.JSON
import com.o2o.utils.Iargs
import org.apache.spark.sql.{DataFrame, SparkSession}

/**
  * @ Auther: o2o-rd-0008
  * @ Date:   20191/4/18
  * @ Param:  $param$
  * @ Description:  网易严选的销量统计
  *
  *   1.销量：日累计销量
  *   2.分类：swbfirstId,swbfirstName,firstCategoryId,secondCategoryId,thirdCategoryId,fourthCategoryId
  *   3.地址：网易公司的具体地址
  *
  */
object WangyiYanxuanApp_618_To_es {

  def main(args: Array[String]): Unit = {
    val spark = SparkSession.builder()
      .appName(s"${this.getClass.getSimpleName}")
      .config("spark.debug.maxToStringFields", "2000")
      .config("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
      //      .config("spark.sql.caseSensitive", "true")
      .master("local[*]")
      .getOrCreate()

    val sc = spark.sparkContext
    sc.hadoopConfiguration.set("fs.s3a.access.key", Iargs.OBSACCESS)
    sc.hadoopConfiguration.set("fs.s3a.secret.key", Iargs.OBSSECRET)
    sc.hadoopConfiguration.set("fs.s3a.endpoint", Iargs.OBSENDPOINT)
    sc.setLogLevel("WARN")

    /** *******************  重要    修改   start   ************************************/

    val year = Iargs.YEAR
    val month = Iargs.MONTH
    val timeStamp = "1623945600"
//    val timeStamp = Iargs.TIMESAMP

    val platform = "wangyiyanxuan"
    val endTime: Long = 4133951999L //末日

    //打时间戳：needTime
    val needTime = timeStamp.toLong

    //商品的清洗结果路径
//    val resultPath = s"s3a://o2o-dataproces-group/zyf/${year}/${month}/${platform}/"
    val resultPath = s"s3a://o2o-dataproces-group/zyf/2021/620/wangyiyanxuan/"


    /** 入ES  ****/
        SAVE_2_ES(spark, spark.read.orc(resultPath), platform, year, month)


    sc.stop()

  }

  /**
    * 入ES库
    *
    * @param spark
    * @param dataFrame
    * @param platform_Name
    * @param year
    * @param month
    */
  def SAVE_2_ES(spark: SparkSession, dataFrame: DataFrame, platform_Name: String, year: String, month: String) = {
    val ress = dataFrame.toJSON.rdd.map(line => {
      val lines = JSON.parseObject(line)
      lines
    })
    println("**********         开始入库" + "\n" + "**********         node_157" + "\n" +
      s"          ${year}_${platform_Name}/${platform_Name}_${year}_${month}")
    import org.elasticsearch.spark._

    // 正常跑es --------索引
//    val index = s"${year}_${platform_Name}/${platform_Name}_${year}_${month}"

    // 618 活动  -------索引beibeiapp_2020_activity/beibeiapp_2020_618
    val index = s"${platform_Name}_${year}_activity/${platform_Name}_${year}_${month}18"

    ress.saveToEs(index,
      Map("es.mapping.id" -> "good_id",
        "es.nodes" -> "192.168.1.29",
        "es.net.http.auth.user" -> "elastic",
        "es.net.http.auth.pass" -> "changeme",
        "es.port" -> "9200",
        "cluster.name" -> "O2OElastic"))

    println("\n*****************  已入完  *****************")
//1306026_1466071 3988145_300353599  3993697_300233649 3999050_300273355
  }


}
