import org.apache.commons.io.IOUtils
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{Dataset, Encoders, SparkSession, functions}
import org.apache.spark.{Partition, SparkConf, SparkContext, TaskContext}
import seaweedfs.client.FilerProto.Entry
import seaweedfs.client.SeaweedRead.ChunkView
import seaweedfs.client.{FilerClient, FilerProto, SeaweedInputStream}

import java.net.{HttpURLConnection, URL}
import java.util
import java.util.{Arrays, List}
import scala.collection.JavaConverters._
import scala.collection.convert.ImplicitConversions.`collection AsScalaIterable`


object Hello {


  def main(args: Array[String]): Unit = {

    val sparkConf = new SparkConf()
      .setAppName("SparkSeaweedExample")
      .setMaster("local[1]")

      .set("spark.hadoop.fs.seaweedfs.impl", "seaweed.hdfs.SeaweedFileSystem")
      .set("spark.hadoop.fs.defaultFS", "seaweedfs://localhost:8888")
      .set("spark.history.fs.logDirectory", "seaweedfs://localhost:8888/log-history/")

      .set("spark.eventLog.dir", "seaweedfs://localhost:8888/")
      .set("spark.eventLog.enabled", "true")

      .set("spark.sql.catalog.spark_catalog", "org.apache.iceberg.spark.SparkSessionCatalog")
      .set("spark.sql.catalog.spark_catalog.type", "hadoop")
      .set("spark.sql.catalog.spark_catalog.catalog-impl", "org.apache.iceberg.hadoop.HadoopCatalog")
      .set("spark.sql.catalog.spark_catalog.warehouse", "seaweedfs://localhost:8888/warehouse/")


      .set("spark.sql.hive.convertMetastoreOrc", "true")
      .set("spark.sql.statistics.fallBackToHdfs", "true")
      .set("spark.sql.orc.filterPushdown", "true")
      .set("spark.sql.orc.impl", "native")

      .set("spark.history.ui.port", "18081")
      .set("spark.history.fs.cleaner.interval", "7d")
      .set("spark.history.provider", "org.apache.spark.deploy.history.FsHistoryProvider")
      .set("spark.history.fs.cleaner.maxAge", "90d")
      .set("spark.history.fs.cleaner.enabled", "true")


    val spark = SparkSession.builder.config(sparkConf).getOrCreate()

    val ds = spark.createDataset(spark.sparkContext.parallelize(
      """
        |{'name':'User-1', 'age':1122}
        |{'name':'User-2', 'age':1130}
        |{'name':'User-3', 'age':1119}
        """.stripMargin.split(IOUtils.LINE_SEPARATOR)))(Encoders.STRING)
    val sampleDf = spark.read.json(ds).toDF()
    sampleDf.write.format("avro").mode("append")
      .save("seaweedfs://localhost:8888/buckets/test-bucket2/")


    val df = spark.read
      .format("avro")
      .load("seaweedfs://localhost:8888/buckets/test-bucket2/")
    //请求 http://localhost:8888/buckets/test-bucket/ 可以看到目录数据

    val start = System.currentTimeMillis()

    df.select(functions.col("age"), functions.col("name"))
      .sort("age")
      .show(10000, truncate = true)

    spark.close()
  }
}
