package com.o2o.cleaning.month.platform.ebusiness_plat.nlp

import com.alibaba.fastjson.{JSON, JSONObject}
import org.apache.spark.sql.functions.lit
import org.apache.spark.sql.{SparkSession, functions}

import scala.collection.mutable.ListBuffer

object CheckDataDetail {

  def main(args: Array[String]): Unit = {

    val spark = SparkSession.builder()
      .appName("CheckDataDetail")
      .config("spark.debug.maxToStringFields", "2000")
      .config("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
      .config("spark.sql.caseSensitive", "true")
      .config("es.nodes", "192.168.1.29")
      .config("es.port", "9200")
      .config("cluster.name", "O2OElastic")
      .config("es.net.http.auth.user", "elastic")
      .config("es.net.http.auth.pass", "changeme")
      .master("local[*]")
      .getOrCreate()

    val sc = spark.sparkContext
    sc.hadoopConfiguration.set("fs.s3a.access.key", "GAO7EO9FWKPJ8WFCQDME")
    sc.hadoopConfiguration.set("fs.s3a.secret.key", "LZ0xaHBSYKHaJ9ECDbX9f7zin79UZkXfGoNapRPL")
    sc.hadoopConfiguration.set("fs.s3a.endpoint", "https://obs.cn-north-1.myhuaweicloud.com")
    sc.setLogLevel("WARN")

    //    spark.read.json("D:\\o2o4zyfBywork\\需求\\NLP\\source\\jd").printSchema()

    //    spark.read.json("D:\\o2o4zyfBywork\\需求\\NLP\\source\\tmall")
    //      .withColumnRenamed("commentDate", "time")
    //      .withColumn("platform_id", lit("10"))
    //      .repartition(1).write.json("D:\\o2o4zyfBywork\\需求\\NLP\\all_source_tmall")

//    spark.read.json("D:\\o2o4zyfBywork\\需求\\NLP\\source\\douyin")
//      .drop("rank_product")
//      .withColumnRenamed("content", "comment")
//      .withColumn("platform_id", lit("73"))
//      .repartition(1).write.json("D:\\o2o4zyfBywork\\需求\\NLP\\all_source_douyin")

//        val result = spark.read.json("D:\\o2o4zyfBywork\\需求\\NLP\\source\\jd").toJSON.rdd.flatMap(line => {
//          val nObject = JSON.parseObject(line)
//          val array = nObject.getJSONArray("comments")
//          if (array.size() > 1) {
//            for (i <- 0 to array.size() - 1) {
//              val nObject1 = array.getJSONObject(i)
//              val content = nObject1.getString("content")
//              val time = nObject1.getString("time")
//              nObject.put("comment", content)
//              nObject.put("time", time)
//            }
//          }
//          nObject.toString
//        })
//        spark.read.json(result).show(false)

        /*val value = spark.read.json("D:\\o2o4zyfBywork\\需求\\NLP\\source\\jd").rdd.map(line => {
          val nObject = JSON.parseObject(line.toString())
          nObject.toString
        })*/

    val value = spark.read.json("D:\\o2o4zyfBywork\\需求\\NLP\\source\\jd").toJSON.rdd.flatMap(line => {
      val nObject = JSON.parseObject(line.replaceAll("\n",""))
      val array = nObject.getJSONArray("comments")
      val good_id = nObject.getString("good_id")
      var listCommons = new ListBuffer[String]()
      if (array.size() > 1) {
        for (i <- 0 to array.size() - 1) {
//          val nObject1 = array.getJSONObject(i)
//          val content = nObject1.getString("content")
//          val time = nObject1.getString("time")
//          nObject.put("comment", content)
//          nObject.put("time", time)

          val nObject1 = array.getJSONObject(i)
          nObject1.put("good_id",good_id)

          listCommons.append(nObject1.toJSONString)
        }
      }
      listCommons
//      nObject.toString
    })
//    value.collect().foreach(str=>println(str))


        spark.read.json(value)
          .registerTempTable("t1")
    spark.sql(
      """
        |select * from t1
        |""".stripMargin).show(400,false)
//        spark.sql(
//          """
//            |select good_id,explode(split(regexp_replace(regexp_replace(comments,'\\[\\{',''),'}]',''),'},\\{')) comments from t1 where good_id = '100030831972'
//            |""".stripMargin)
//              .show(false)
    //    spark.sql(
    //      """
    //        |select good_id,commentss
    //        |(select good_id,commentss from t1 lateral view explode( split(regexp_replace(regexp_replace(comments,'\\[\\{',''),'}]',''),'},\\{') ) views as commentss )
    //        |where good_id = '100030831972'
    //        |""".stripMargin)
    //          .show(false)


    //    select distinct teacher_id from (select teachernew teacher_id from teacher_id lateral view explode( split(teachers,'\\|') ) views as teachernew) where teacher_id != ''


    //      .repartition(1).write.json("D:\\o2o4zyfBywork\\需求\\NLP\\all_source_tmall")

    //    spark.read.json("D:\\o2o4zyfBywork\\需求\\NLP\\all_source").printSchema()
  }

}
