package com.o2o.cleaning.month.platform.ebusiness_plat.tengxunketang

import com.alibaba.fastjson.JSON
import org.apache.spark.SparkContext
import org.apache.spark.sql.{DataFrame, SparkSession}
import org.elasticsearch.spark._

object CheckDataDetail {

  def main(args: Array[String]): Unit = {

    val spark = SparkSession.builder()
      .appName("CheckDataDetail")
      .config("spark.debug.maxToStringFields", "2000")
      .config("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
      .config("spark.sql.caseSensitive", "true")
      .config("es.nodes", "192.168.1.29")
      .config("es.port", "9200")
      .config("cluster.name", "O2OElastic")
      .config("es.net.http.auth.user", "elastic")
      .config("es.net.http.auth.pass", "changeme")
      .master("local[*]")
      .getOrCreate()

    val sc = spark.sparkContext
    sc.hadoopConfiguration.set("fs.s3a.access.key", "GAO7EO9FWKPJ8WFCQDME")
    sc.hadoopConfiguration.set("fs.s3a.secret.key", "LZ0xaHBSYKHaJ9ECDbX9f7zin79UZkXfGoNapRPL")
    sc.hadoopConfiguration.set("fs.s3a.endpoint", "https://obs.cn-north-1.myhuaweicloud.com")
    sc.setLogLevel("WARN")


    //    spark.read.orc(s"s3a://o2o-dataproces-group/zyf/2022/4/tengxunketang_old/")
    //      .show(false)
    //      .registerTempTable("t1")
    //          .where("good_id != '128320438' and good_id != '116344482'")
    //    spark.sql("" +
    //      "select count(1),sum(sellCount),sum(salesAmount) from t1 where cast(salesAmount as float) <2000000") //good_id not in ('36851','20773')
    //      .show(false)
    //    spark.sql("" +
    //      "select * from t1 order by cast(salesAmount as float) desc")
    //      .show(false)

    //          spark.read.orc("s3a://o2o-dataproces-group/zyf/tengxunketang/source/2022/04/")registerTempTable("thismonth")
    //          spark.read.orc("s3a://o2o-dataproces-group/zyf/tengxunketang/source/2022/03/").registerTempTable("lastmonth")
    //    spark.sql(     "select sum(aaaa) from (\n" +
    //      "      select case when b.servicesId is null then a.servicesStuCount else a.servicesStuCount-b.servicesStuCount end aaaa " +
    //      "      from (select servicesId,max(cast(servicesStuCount as bigint)) servicesStuCount" +
    //      "            from thismonth where servicesStuCount is not null" +
    //      "            group by servicesId)a " +
    //      "      left join(select servicesId ,max(cast(servicesStuCount as bigint)) servicesStuCount" +
    //      "            from lastmonth where servicesStuCount is not null" +
    //      "            group by servicesId)b " +
    //      "      on a.servicesId = b.servicesId)" +
    //      "      where aaaa > 0"
    //    ).show()
    //    orcToJson(spark, sc, 2021, 12)
    //    orcToJson(spark, sc, 2022, 1)
    //    orcToJson(spark, sc, 2022, 2)
    //    orcToJson(spark, sc, 2022, 3)
    //    orcToJson(spark, sc, 2022, 4)
    //    orcToJson(spark, sc, 2022, 5)
    //    orcToJson(spark, sc, 2022, 6)
    //    orcToJson(spark, sc, 2022, 7)
    orcToJson(spark, sc, 2022, 8)
    //    orcToJson(spark, sc, 2022, 9)
  }

  def orcToJson(spark: SparkSession, sc: SparkContext, year: Int, month: Int): Unit = {
    println(s"${year}---${month}")
    val frame = spark.read.json(s"s3a://o2o-sourcedata-${year}/obs-source-${year}/${month}/tengxunketang/")
      .repartition(1).write.orc(s"s3a://o2o-tempdata/zyf/tengxunketang/source/${year}/0${month}/")
  }
}
