package com.wangwg.sparkTest.Shp

import java.io.File

import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{FileSystem, Path}
import org.apache.spark.{SparkConf, SparkContext, TaskContext}
import org.geotools.data.{FeatureWriter, Query, Transaction}
import org.geotools.feature.simple.SimpleFeatureTypeBuilder
import org.locationtech.geomesa.spark.{GeoMesaSpark, GeoMesaSparkKryoRegistrator, SpatialRDD}
import org.opengis.feature.simple.{SimpleFeature, SimpleFeatureType}

import scala.collection.JavaConversions._

/**
 * spark shpfile数据批量写入到hdfs
 */
object GeomesaShpToHdfs {
  def main(args: Array[String]): Unit = {
    val sparkConf = new SparkConf().setMaster("local[*]").setAppName("GeomesaShpToHdfs");
    sparkConf.set("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
    sparkConf.set("spark.kryo.registrator", classOf[GeoMesaSparkKryoRegistrator].getName)
    val sparkContext = SparkContext.getOrCreate(sparkConf);
    var intpuParams: Map[String, String] = Map()
    var File = new File("D:\\work\\bigdata")
    intpuParams += ("url" -> File.toURI.toURL.toString)
    intpuParams += ("geotools" -> "true")
    val query = new Query("gis_osm_landuse_a_free_1")
    val inputRdd = GeoMesaSpark(intpuParams).rdd(new Configuration(), sparkContext, intpuParams, query)
    println(inputRdd.schema) //打印方案
    writeToHdfs(sparkContext, inputRdd)
  }

  /**
   * 保存数据到hdfs
   *
   * @param sparkContext
   * @param inputRdd
   */
  def writeToHdfs(sparkContext: SparkContext, inputRdd: SpatialRDD): Unit = {
    println("默认分区数:".concat(inputRdd.getNumPartitions.toString))
    val shpCount = inputRdd.count();
    var rddCount = shpCount / 2000;
    if (shpCount - rddCount * 2000 > 0) {
      rddCount = rddCount + 1;
    }
    if (rddCount < 1) {
      rddCount = 1;
    }
    val reRdd = inputRdd.repartition(rddCount.toInt) //将rdd重新分区
    println("现有分区数:".concat(reRdd.getNumPartitions.toString))
    val shpName = "landuse"

    val hdfsUrl = "hdfs://lhyg-wangwengang:9000"
    val hdfsConf: Configuration = new Configuration();
    hdfsConf.set("fs.defaultFS", hdfsUrl)
    val hdfs = FileSystem.get(hdfsConf)
    hdfs.mkdirs(new Path("/input/".concat(shpName)))
    val pathBroadcast = sparkContext.broadcast(new Path("/input/".concat(shpName)))

    reRdd.foreachPartition(partition => {
      var outputParams: Map[String, String] = Map()
      outputParams += ("fs.path" -> pathBroadcast.value.toUri.toString)
      val fileName = shpName.concat("_").concat(TaskContext.getPartitionId().toString)
      val dataStore = org.geotools.data.DataStoreFinder.getDataStore(outputParams)
      var featurewriter: FeatureWriter[SimpleFeatureType, SimpleFeature] = null
      partition.foreach(item => {
        try {
          if (featurewriter == null) {
            val simpleFeatureTypeBuilder = new SimpleFeatureTypeBuilder
            simpleFeatureTypeBuilder.init(item.getFeatureType)
            simpleFeatureTypeBuilder.setName(fileName)
            dataStore.createSchema(simpleFeatureTypeBuilder.buildFeatureType)
            featurewriter = dataStore.getFeatureWriterAppend(fileName, Transaction.AUTO_COMMIT)
          }
          val simpleFeature = featurewriter.next()
          simpleFeature.setAttributes(item.getAttributes)
          simpleFeature.setDefaultGeometry(item.getDefaultGeometry)
          featurewriter.write()
        } catch {
          case e: Exception => {
            e.printStackTrace()
          }
        }
      }
      )
      if (featurewriter != null) {
        featurewriter.close()
      }
      dataStore.dispose()
    })
  }
}
