package com.wangwg.sparkTest.PostGIS

import org.apache.hadoop.conf.Configuration
import org.apache.spark.{SparkConf, SparkContext, TaskContext}
import org.geotools.data.Query
import org.locationtech.geomesa.spark.{GeoMesaSpark, GeoMesaSparkKryoRegistrator, SpatialRDD}

import scala.collection.JavaConversions._


/**
 * 打开postgis数据库，遍历循环输出数据
 * 数据读取测试例子
 *
 * @author wangwg
 */
object GeomesaPostGISToHbase {
  def main(args: Array[String]): Unit = {
    val sparkConf = new SparkConf().setMaster("local[*]").setAppName("GeomesaPostGISToHbase");
    sparkConf.set("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
    sparkConf.set("spark.kryo.registrator", classOf[GeoMesaSparkKryoRegistrator].getName)
    val sparkContext = SparkContext.getOrCreate(sparkConf);
    val params = Map(
      "geotools" -> "true",
      "dbtype" -> "postgis",
      "host" -> "139.9.130.25",
      "user" -> "lhpostgis",
      "passwd" -> "lhpostgis",
      "port" -> "5432",
      "database" -> "lhpostgis");
    val query = new Query("SJ_KCZY")
    val rdd = GeoMesaSpark(params).rdd(new Configuration(), sparkContext, params, query)
    println(rdd.schema) //打印数据存储方案
    printFeature(sparkContext, rdd);
    println("end");
    //params += ("url" -> shapefile.toURI.toURL().toString());
    //val dataStore = DataStoreFinder.getDataStore(params);
    //val shpDatastor = dataStore.asInstanceOf[ShapefileDataStore];
    //shpDatastor.setCharset(Charset.forName("UTF-8"));

    /*  val params = Map(
        //"geomesa.converter" -> "",
        "geomesa.converter.inputs" -> shapefile.path,
        "geomesa.sft" -> "geom:MultiPolygon:srid=4326",
        "geomesa.sft.name" -> "TDLYXZ")
      val query = new Query("TDLYXZ")*/
    //val hBaseDataStore = DataStoreFinder.getDataStore(params).asInstanceOf[HBaseDataStore];
    //val shpDataStore = DataStoreFinder.getDataStore(params).asInstanceOf[ShapefileDataStore].setCharset(Charset.forName("UTF-8"))
    //val rdd = GeoMesaSpark(params).rdd(new Configuration(), sparkContext, params, query)
    /*   var hdfsparams: Map[String, String] = Map();
       hdfsparams += ("fs.path" -> "hdfs://geomesa1.com:9000/data");
       val hdfsdataStore = org.geotools.data.DataStoreFinder.getDataStore(hdfsparams);

       val query = new Query("TDLYXZ");
       val rdd = GeoMesaSpark(params).rdd(new Configuration(), sparkContext, params, query);
       println(rdd.schema);
       println("分区" + rdd.partitions.length);*/

    //val params = Map("hbase.zookeepers" -> "geomesa1.com:2181,geomesa2.com:2181,geomesa3.com:2181", "hbase.catalog" -> "Test");
    // val query = new Query("TestPoint2");
    // val spatialRDDProvider = GeoMesaSpark(params);
    //val hBaseDataStore = DataStoreFinder.getDataStore(params).asInstanceOf[HBaseDataStore]
    //val rdd = spatialRDDProvider.rdd(new Configuration(), sparkContext, params, query);
    //println(rdd.schema);
    //rdd.collect().foreach(record => {
    // println(record)
    //});
    // println("close")
    //hBaseDataStore.dispose();
  }

  def printFeature(sparkContext: SparkContext, rdd: SpatialRDD): Unit = {
    //GeoMesaSparkKryoRegistrator.broadcast(rdd);
    //val broadcastedCover = sparkContext.broadcast(rdd.collect)
    println("默认分区数:".concat(rdd.getNumPartitions.toString))
    val reRdd = rdd.repartition(3) //将rdd重新分为5个分区
    println("现有分区数:".concat(reRdd.getNumPartitions.toString))
    reRdd.foreachPartition { iter =>
      //println("当前分区id:".concat(TaskContext.getPartitionId.toString).concat(",数据量:").concat(iter.length.toString))
      iter.foreach(item => {
        println("当前分区id:".concat(TaskContext.getPartitionId.toString).concat(":").concat(item.toString))
      }
      )
    }
  }
}
