package a_o2odata_deal

import org.apache.spark.sql.SQLContext
import org.apache.spark.{SparkConf, SparkContext}


/**
  * @ Auther: o2o-rd-0008
  * @ Date:   2019/11/27 14:56
  * @ Param:  ${PARAM}
  * @ Description: 
  */
object mongo_main {
  def main(args: Array[String]): Unit = {
    val conf = new SparkConf()
    conf.setAppName(s"${this.getClass.getSimpleName}")
    conf.set("spark.debug.maxToStringFields", "500")
    //conf.setMaster("local[*]")
    conf.set("es.nodes", "192.168.1.157")
    conf.set("es.port", "9200")
    conf.set("cluster.name", "O2OElastic")
    conf.set("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
    conf.set("spark.sql.caseSensitive", "true")
    val sc = new SparkContext(conf)
    val sqlContext = new SQLContext(sc)
    sc.setLogLevel("WARN")
    sc.hadoopConfiguration.set("fs.s3a.access.key", "GAO7EO9FWKPJ8WFCQDME")
    sc.hadoopConfiguration.set("fs.s3a.secret.key", "LZ0xaHBSYKHaJ9ECDbX9f7zin79UZkXfGoNapRPL")
    sc.hadoopConfiguration.set("fs.s3a.endpoint", "https://obs.cn-north-1.myhuaweicloud.com")
    //从mongo中取数
    a_aa_amainpackage.a_o2odata_deal.utils.from_mongo.spark_mongo_restart(sqlContext,sc)




    //打上nick
   /* val anchor = sqlContext.read.json(s"s3a://o2o-dataproces-group/panzonghao/tmall/11/tmall_anchor_id")
    val value = anchor.toJSON.rdd.map(line => {
      val nObject = JSON.parseObject(line)
      val anchorId = nObject.getOrDefault("anchorId", "-1").toString
      val nick = nObject.getOrDefault("nick", "-1").toString
      nObject.put("anchorId", anchorId)
      nObject.put("nick", nick)
      nObject.toJSONString
    })

    sqlContext.read.json(value).registerTempTable("anchor")
    for (months <- 1 to 10) {
      val zhibo = sqlContext.read.orc(s"s3a://o2o-dataproces-group/panzonghao/taobao/${months}/zhibo_zhubo")
      val value1 = zhibo.toJSON.rdd.map(line => {
        val nObject = JSON.parseObject(line)
        val anchorId = nObject.getOrDefault("anchorId", "-1").toString
        val nick = nObject.getOrDefault("nick", "-1").toString
        nObject.put("anchorId", anchorId)
        nObject.put("nick", nick)
        nObject.toJSONString
      })
      sqlContext.read.json(value1).registerTempTable("zhibo")

      val dataa = sqlContext.sql(
        s"""
           |select
           |t1.*,
           |case when t2.anchorId is null then t1.nick else t2.nick end as nicks
           |from
           |zhibo t1
           |left join
           |anchor t2
           |on t1.anchorId=t2.anchorId
         """.stripMargin).drop("nick").withColumnRenamed("nicks", "nick")
      dataa.repartition(4).write.orc(s"s3a://o2o-dataproces-group/panzonghao/taobao/${months}/zhibo_zhubo_v1")
    }

*/


    //拉取数据
    /*for (months <- 1 to 12){
      val value: RDD[String] = sc.esJsonRDD(s"2019_taobao/taobao_2019_${months}").values
      value.repartition(24).saveAsTextFile(s"s3a://o2o-dataproces-group/panzonghao/zhibo_zf/backups/taobao/${months}")

      val value1: RDD[String] = sc.esJsonRDD(s"2019_tmall/tmall_2019_${months}").values
      value1.repartition(12).saveAsTextFile(s"s3a://o2o-dataproces-group/panzonghao/zhibo_zf/backups/tmall/${months}")
    }*/


    /*import org.elasticsearch.spark._
    //商品数据的入库
    for (months <- 12 to 12){
      val result = sqlContext.read.orc(s"s3a://o2o-dataproces-group/panzonghao/taobao/${months}/zhibo_finally/")
      val value = result.toJSON.rdd.map(line => {
        val lines = JSON.parseObject(line)
        val evaluates = lines.getOrDefault("evaluates", "-1").toString
        val Base_Info = lines.getOrDefault("Base_Info", "-1").toString
        var promotion_price = lines.getOrDefault("promotion_price","-1").toString
        var original_cost = lines.getOrDefault("original_cost","-1").toString
        val priceText = lines.get("priceText").toString
        if (promotion_price.contains("?") | promotion_price.contains("？")){
          promotion_price = priceText
        }
        if (original_cost.contains("?") | original_cost.contains("？")){
          original_cost = priceText
        }
        var ev = "-1"
        var str = new JSONObject
        if (evaluates.equals("-1")) {
          ev = "{\"fuyi\":\"-1\"}"
        } else {
          str = JSON.parseObject(evaluates)
        }
        if (!ev.contains("fuyi")) {
          lines.put("evaluates", str)
        } else {
          val evs = JSON.parseObject(ev)
          lines.put("evaluates", evs)
        }
        lines.put("Base_Info",Base_Info)
        //lines.put("promotion_price",promotion_price)
        lines.put("original_cost",original_cost)
        lines
      })
      //原始代码
      value.saveToEs(s"2019_taobao/taobao_2019_${months}", Map("es.mapping.id" -> "good_id"))

    }
*/
    //主播数据的入库及主播加商品数据的入库
    /*import org.apache.spark.sql.functions._
    import org.elasticsearch.spark._
    for (months <- 1 to 1){
      var result = sqlContext.sparkSession.emptyDataFrame
      var result1 = sqlContext.sparkSession.emptyDataFrame
      if (months==2){
        result = sqlContext.read.orc(s"s3a://o2o-dataproces-group/panzonghao/tmall/2020/${months}/zhibo_zhubo")
              .drop("timeStamp").withColumn("timeStamp",lit("1582819200"))
      }else{
         result = sqlContext.read.orc(s"s3a://o2o-dataproces-group/panzonghao/tmall/2020/${months}/zhibo_zhubo")
      }

      result.toJSON.rdd.map(line =>{
        JSON.parseObject(line)
      }).saveToEs(s"2020_tmall_anchor/tmall_anchor_2020_${months}", Map("es.mapping.id" -> "anchorId"))


      if (months==2){
        result1 = sqlContext.read.orc(s"s3a://o2o-dataproces-group/panzonghao/tmall/2020/${months}/zhibo_live_good")
          .drop("timeStamp").withColumn("timeStamp",lit("1582819200"))
      }else{
        result1 = sqlContext.read.orc(s"s3a://o2o-dataproces-group/panzonghao/tmall/2020/${months}/zhibo_live_good")
      }

      result1.toJSON.rdd.map(line =>{
        JSON.parseObject(line)
      }).saveToEs(s"2020_tmall_good_live/tmall_good_live_2020_${months}", Map("es.mapping.id" -> "liveAndgood_id"))

    }

    for (months <- 1 to 1){
      var result = sqlContext.sparkSession.emptyDataFrame
      var result1 = sqlContext.sparkSession.emptyDataFrame
      if (months==2){
        result = sqlContext.read.orc(s"s3a://o2o-dataproces-group/panzonghao/taobao/2020/${months}/zhibo_zhubo")
          .drop("timeStamp").withColumn("timeStamp",lit("1582819200"))
      }else{
        result = sqlContext.read.orc(s"s3a://o2o-dataproces-group/panzonghao/taobao//2020/${months}/zhibo_zhubo")
      }

      result.toJSON.rdd.map(line =>{
        JSON.parseObject(line)
      }).saveToEs(s"2020_taobao_anchor/taobao_anchor_2020_${months}", Map("es.mapping.id" -> "anchorId"))


      if (months==2){
        result1 = sqlContext.read.orc(s"s3a://o2o-dataproces-group/panzonghao/taobao/2020/${months}/zhibo_live_good")
          .drop("timeStamp").withColumn("timeStamp",lit("1582819200"))
      }else{
        result1 = sqlContext.read.orc(s"s3a://o2o-dataproces-group/panzonghao/taobao/2020/${months}/zhibo_live_good")
      }

      result1.toJSON.rdd.map(line =>{
        JSON.parseObject(line)
      }).saveToEs(s"2020_taobao_good_live/taobao_good_live_2020_${months}", Map("es.mapping.id" -> "liveAndgood_id"))

    }
*/

    sc.stop()


  }

}
