package com.sugon.zip

import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{FileSystem, Path}
import org.apache.spark.TaskContext
import org.apache.spark.sql.{DataFrame, Row, SparkSession}

/**
  * ./spark-shell --master yarn --num-executors 11 --jars /root/hudi/udf_util-1.0-SHAPSHOT.jar
  * --conf spark.cassandra.connection.host = 182.186.180.21
  */
object ZipHandle {

  def main(args: Array[String]): Unit = {

    if (args.length < 2) {
      throw new IllegalArgumentException("输入参数错误,参数依次为 '替换地址' '来源地址' ")
    }
    // temp 替换地址
    val des = args(0)
    // 源地址 /buckets/testsource/temp/
    val source = args(1)

    // Logger.getLogger("org").setLevel(Level.WARN)
    val spark: SparkSession = SparkSession.builder()
      .appName("file move")
      .getOrCreate()

    spark.conf.set("spark.sql.catalog.mycatalog",
      "com.datastax.spark.connector.datasource.CassandraCatalog")
    spark.conf.set("spark.cassandra.connection.host", "182.186.180.21")


    val filePath: DataFrame = spark.sql(
      s"select directory||'/'||name  " +
        s"from mycatalog.seaweedfs.filemeta " +
        s"where directory = '$source' and name like '%.zip' ")

    //    filePath.repartition(9).foreachPartition((it: Iterator[Row]) => {
    //      val partitionId: Int = TaskContext.getPartitionId()
    //      val conf: Configuration = new Configuration()
    //      conf.set("fs.defaultFS", "seaweedfs://slave11:8888")
    //      conf.set("fs.seaweedfs.impl", "seaweed.hdfs.SeaweedFileSystem")
    //      val fileSystem: FileSystem = FileSystem.get(conf)
    //
    //      it.foreach(record => {
    //        val destFilePath: String = record(0).toString.replace(des, des + "/" + partitionId.toString)
    //        fileSystem.rename(new Path(record(0).toString), new Path(destFilePath))
    //
    //
    //      })
    //    })

    filePath.repartition(9).foreach((it: Row) => {

      val partitionId: Int = TaskContext.getPartitionId()

      val conf: Configuration = new Configuration()

      conf.set("fs.defaultFS", "seaweedfs://slave11:8888")

      conf.set("fs.seaweedfs.impl", "seaweed.hdfs.SeaweedFileSystem")

      val fileSystem: FileSystem = FileSystem.get(conf)

      val destFilePath: String = it.get(0).toString.replace(des, des + "/" + partitionId.toString)

      fileSystem.rename(new Path(it.get(0).toString), new Path(destFilePath))


    })


    spark.close()

  }
}
