package com.wangwg.sparkTest.Analysis.Shp

import java.io.File
import java.util.UUID

import org.apache.hadoop.conf.Configuration
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.SparkSession
import org.apache.spark.{SparkContext, TaskContext}
import org.geotools.data.shapefile.{ShapefileDataStore, ShapefileDataStoreFactory}
import org.geotools.data.{FeatureWriter, Query, Transaction}
import org.locationtech.geomesa.spark.jts._
import org.locationtech.geomesa.spark.{GeoMesaSpark, GeoMesaSparkKryoRegistrator, SpatialRDD}
import org.locationtech.jts.geom.Geometry
import org.opengis.feature.simple.{SimpleFeature, SimpleFeatureType}

import scala.collection.JavaConversions._
import scala.collection.mutable.ListBuffer
import scala.reflect.io.Directory

/**
 * geomeas 数据缓冲区分析
 *
 * @author wangwg
 */
object GeomesaBufferShp {
  def main(args: Array[String]): Unit = {
    var startTime = System.currentTimeMillis();
    val sparkSession = SparkSession.builder()
      .appName("GeomesaIntersetBlockShp")
      .config("spark.driver.maxResultSize", "24g") //设置driver端结果存放的最大容量，这里设置成为2G，超过2G的数据,job就直接放弃，不运行了
      .config("spark.driver.memory", "24g") //driver给的内存大小
      .config("spark.executor.memory", "24g") // 每个executor的内存
      .config("spark.executor.cores", "1")
      .config("spark.default.parallelism", "200") //任务数量,默认是跟cpu核数一样
      .config("spark.sql.crossJoin.enabled", "true")
      .config("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
      .config("spark.kryo.registrator", classOf[GeoMesaSparkKryoRegistrator].getName)
      .master("local[*]")
      .getOrCreate()
      .withJTS
    //val sourceRdd = openShpRdd(sparkSession.sparkContext, "D:\\work\\bigdata\\data\\landuse", "gis_osm_landuse_a_free_1") //159万
    val bufferRDD = openShpRdd(sparkSession.sparkContext, "D:\\work\\bigdata\\data\\buildings", "gis_osm_buildings_a_free_1") //45.5万

    val splitResultRDD = splitRdd(bufferRDD, 200000) //分割后的RDD，数据量小，分发后也不会内存溢出，特别是上亿的要素数据

    val resultPath = "D:\\work\\bigdata\\buffer\\".concat(sparkSession.sparkContext.getConf.getAppId) //创建当前应用的输出目录
    val directory = Directory(resultPath)
    directory.createDirectory(true, false) //创建目录

    for (itemRDD <- splitResultRDD) {
      val resultRdd = itemRDD.mapPartitions(partition => {
        partition.flatMap(item => {
          val intersetArray = ListBuffer[SimpleFeature]();
          val originGeometry: Geometry = item.getDefaultGeometry.asInstanceOf[Geometry]
          val distance = originGeometry.getEnvelopeInternal.getWidth * 0.35
          item.setDefaultGeometry(originGeometry.buffer(distance))
          intersetArray += item
          intersetArray
        })
      })
      //val reResultRdd = resultRdd
      val reResultRdd = repartRdd(resultRdd, 100000)
      reResultRdd.foreachPartition(partition => {
        import java.io.{File, Serializable}
        import java.util
        val path = resultPath.concat("\\").concat(UUID.randomUUID.toString).concat("_").concat(TaskContext.getPartitionId.toString).concat(".shp")
        val file = new File(path)
        val featureparams: util.Map[String, Serializable] = new util.HashMap[String, Serializable]
        featureparams.put(ShapefileDataStoreFactory.URLP.key, file.toURI.toURL)
        var featurewriter: FeatureWriter[SimpleFeatureType, SimpleFeature] = null;
        var featureshapefileDataStore: ShapefileDataStore = null
        partition.foreach(item => {
          try {
            if (featurewriter == null) {
              featureshapefileDataStore = new ShapefileDataStoreFactory().createNewDataStore(featureparams).asInstanceOf[ShapefileDataStore]
              featureshapefileDataStore.createSchema(item.getFeatureType)
              featurewriter = featureshapefileDataStore.getFeatureWriterAppend(Transaction.AUTO_COMMIT)
            }
            var simpleFeature = featurewriter.next()
            simpleFeature.setAttributes(item.getAttributes)
            simpleFeature.setDefaultGeometry(item.getDefaultGeometry)
            featurewriter.write()
          } catch {
            case e: Exception => {
              e.printStackTrace()
            }
          }
        })
        if (featurewriter != null) {
          featurewriter.close()
          featureshapefileDataStore.dispose()
        }
      })
    }
    val endTime = System.currentTimeMillis
    println((endTime - startTime) / 1000)
  }


  /**
   * 分割RDD
   *
   * @param rdd
   * @param count 一个rdd数据量大小
   */
  def splitRdd(rdd: RDD[SimpleFeature], count: Int): Array[RDD[SimpleFeature]] = {
    val maxCount = rdd.count()
    var splitCount = (maxCount / count).intValue()
    if (maxCount - splitCount * count > 0) splitCount = splitCount + 1
    if (splitCount < 1) splitCount = 1
    val splitArray = ListBuffer[Double]();
    val range = 1.doubleValue() / splitCount.doubleValue();
    var pool = 1.0;
    while (pool > 0.00000001) {
      if (pool <= range) {
        splitArray += pool
        pool = 0.0
      } else {
        pool = pool - range
        splitArray += range
      }
    }
    rdd.randomSplit(splitArray.toArray)
  }

  def openShpRdd(sparkContext: SparkContext, path: String, name: String): SpatialRDD = {
    var params: Map[String, String] = Map()
    var File = new File(path)
    params += ("url" -> File.toURI.toURL.toString)
    params += ("geotools" -> "true")
    val typName = name
    val query = new Query(typName)
    GeoMesaSpark(params).rdd(new Configuration(), sparkContext, params, query)
  }

  def repartRdd(rdd: RDD[SimpleFeature], count: Int): RDD[SimpleFeature] = {
    println("默认分区数:".concat(rdd.getNumPartitions.toString))
    val shpCount = rdd.count
    var rddCount = shpCount / count
    if (shpCount - rddCount * count > 0) rddCount = rddCount + 1
    if (rddCount < 1) rddCount = 1
    val reRdd = rdd.repartition(rddCount.toInt)
    println("现有分区数:".concat(reRdd.getNumPartitions.toString))
    reRdd
  }
}
