package com.wangwg.sparkTest.PostGIS

import java.util.UUID

import org.apache.hadoop.conf.Configuration
import org.apache.spark.{SparkConf, SparkContext, TaskContext}
import org.geotools.data.shapefile.{ShapefileDataStore, ShapefileDataStoreFactory}
import org.geotools.data.{FeatureWriter, Query, Transaction}
import org.locationtech.geomesa.spark.{GeoMesaSpark, GeoMesaSparkKryoRegistrator, SpatialRDD}
import org.opengis.feature.simple.{SimpleFeature, SimpleFeatureType}

import scala.collection.JavaConversions._
import scala.reflect.io.Directory


/**
 * 打开postgis数据库，遍历循环输出数据
 * 数据读取测试例子
 * 保存分片的数据到本地shp文件中
 * 50条数据一个shp文件
 *
 * @author wangwg
 */
object GeomesaPostGISToShpfile {
  def main(args: Array[String]): Unit = {
    val sparkConf = new SparkConf().setMaster("local[*]").setAppName("GeomesaPostGISToShpfile");
    sparkConf.set("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
    sparkConf.set("spark.kryo.registrator", classOf[GeoMesaSparkKryoRegistrator].getName)
    val sparkContext = SparkContext.getOrCreate(sparkConf);
    val params = Map(
      "geotools" -> "true",
      "dbtype" -> "postgis",
      "host" -> "139.9.130.25",
      "user" -> "lhpostgis",
      "passwd" -> "lhpostgis",
      "port" -> "5432",
      "database" -> "lhpostgis");
    val query = new Query("SJ_KCZY")
    val rdd = GeoMesaSpark(params).rdd(new Configuration(), sparkContext, params, query)
    println(rdd.schema) //打印数据存储方案

    /*    var saveparams: Map[String, String] = Map()
        //parameters += ("fs.path" -> "hdfs://localhost:9000/fs-root/")
        var File = new File("D:\\work\\bigdata\\TestShp1\\1775a16a-e47c-4c70-bbcc-c75773862f7f_0.shp")
        saveparams += ("url" -> File.toURI.toURL.toString)
        saveparams += ("geotools" -> "true")
        GeoMesaSpark(saveparams).save(rdd, saveparams, "1775a16a-e47c-4c70-bbcc-c75773862f7f_0")*/

    printFeature(sparkContext, rdd);
    println("end");
  }

  def printFeature(sparkContext: SparkContext, rdd: SpatialRDD): Unit = {
    println("默认分区数:".concat(rdd.getNumPartitions.toString))
    val shpCount = rdd.count();
    var rddCount = shpCount / 50;
    if (shpCount - rddCount * 50 > 0) {
      rddCount = rddCount + 1;
    }
    if (rddCount < 1) {
      rddCount = 1;
    }



    val reRdd = rdd.repartition(rddCount.toInt) //将rdd重新分区
    println("现有分区数:".concat(reRdd.getNumPartitions.toString))
    val resultPath = "D:\\work\\bigdata\\".concat(sparkContext.getConf.getAppId) //创建当前应用的输出目录
    val directory = Directory(resultPath);
    directory.createDirectory(true, false); //创建目录
    val pathBroadcast = sparkContext.broadcast(resultPath) //分发公共参数
    val nameBroadcast = sparkContext.broadcast(UUID.randomUUID.toString)
    reRdd.foreachPartition { iter =>
      import java.io.{File, Serializable}
      import java.util
      // partiton.size 不能执行这个方法，否则下面的foreach方法里面会没有数据，
      //因为iterator只能被执行一次
      val path = pathBroadcast.value.concat("\\").concat(nameBroadcast.value).concat("_").concat(TaskContext.getPartitionId.toString).concat(".shp")
      val file = new File(path)
      val featureparams: util.Map[String, Serializable] = new util.HashMap[String, Serializable]
      featureparams.put(ShapefileDataStoreFactory.URLP.key, file.toURI.toURL)
      var featurewriter: FeatureWriter[SimpleFeatureType, SimpleFeature] = null;
      var featureshapefileDataStore: ShapefileDataStore = null
      iter.foreach(item => {
        try {
          if (featurewriter == null) {
            featureshapefileDataStore = new ShapefileDataStoreFactory().createNewDataStore(featureparams).asInstanceOf[ShapefileDataStore]
            featureshapefileDataStore.createSchema(item.getFeatureType)
            featurewriter = featureshapefileDataStore.getFeatureWriterAppend(Transaction.AUTO_COMMIT)
          }
          var simpleFeature = featurewriter.next()
          simpleFeature.setAttributes(item.getAttributes)
          simpleFeature.setDefaultGeometry(item.getDefaultGeometry)
          featurewriter.write()
        } catch {
          case e: Exception => {
            e.printStackTrace()
          }
        }
      }
      )
      featurewriter.close()
      featureshapefileDataStore.dispose()
    }
  }
}
