package edu.zju.gis.dbfg.model.scala

import geotrellis.raster._
import geotrellis.raster.resample._
import geotrellis.proj4._
import geotrellis.spark._
import geotrellis.spark.pyramid._
import geotrellis.vector._
import org.apache.spark._
import org.apache.spark.rdd._
import geotrellis.layer.{FloatingLayoutScheme, Metadata, SpatialKey, TileLayerMetadata, ZoomedLayoutScheme}
import geotrellis.spark.store.file.{FileLayerManager, FileLayerWriter}
import geotrellis.spark.store.hadoop.HadoopGeoTiffRDD
import geotrellis.store.LayerId
import geotrellis.store.file.FileAttributeStore
import geotrellis.spark.store.hadoop.{HadoopLayerManager, HadoopLayerWriter}
import geotrellis.store.hadoop.HadoopAttributeStore
import geotrellis.store.index.ZCurveKeyIndexMethod
import org.apache.hadoop.fs.Path

/**
  * @author Hu
  **/
object MIngest {

  def run(implicit sc: SparkContext,input:String,outputPath:String,layerName:String, partition: Int) = {
    // Read the geotiff in as a single image RDD,
    // using a method implicitly added to SparkContext by
    // an implicit class available via the
    // "import geotrellis.spark.io.hadoop._ " statement.
    val input2 = "file://" + input
    val path = new Path(input2)

    // val path = new Path(input)
    val inputRdd: RDD[(ProjectedExtent, MultibandTile)] = HadoopGeoTiffRDD.multiband(path, HadoopGeoTiffRDD.Options.DEFAULT)

    // Use the "TileLayerMetadata.fromRdd" call to find the zoom
    // level that the closest match to the resolution of our source image,
    // and derive information such as the full bounding box and data type.

    val layout = FloatingLayoutScheme(512)

    val (_, rasterMetaData) =
      CollectTileLayerMetadata.fromRDD(inputRdd, layout)

    // Use the Tiler to cut our tiles into tiles that are index to a floating layout scheme.
    // We'll repartition it so that there are more partitions to work with, since spark
    // likes to work with more, smaller partitions (to a point) over few and large partitions.
    val tiled: RDD[(SpatialKey, MultibandTile)] =
    inputRdd
      .tileToLayout(rasterMetaData.cellType, rasterMetaData.layout, NearestNeighbor)

    // We'll be tiling the images using a zoomed layout scheme
    // in the web mercator format (which fits the slippy map tile specification).
    // We'll be creating 256 x 256 tiles.
    val layoutScheme = ZoomedLayoutScheme(WebMercator, tileSize=256)

    // We need to reproject the tiles to WebMercator
    val (zoom, reprojected): (Int, RDD[(SpatialKey, MultibandTile)] with Metadata[TileLayerMetadata[SpatialKey]]) =
      MultibandTileLayerRDD(tiled, rasterMetaData)
        .reproject(WebMercator, layoutScheme, NearestNeighbor)

    // Create the attributes store that will tell us information about our catalog.
    // val opath: Path = new Path(outputPath)
    // val attributeStore = HadoopAttributeStore(opath, sc.hadoopConfiguration)
    val attributeStore = FileAttributeStore(outputPath)
    // Create the writer that we will use to store the tiles in the local catalog.
    // val writer = HadoopLayerWriter(opath, attributeStore)
    val writer = FileLayerWriter(attributeStore)
    // Pyramiding up the zoom levels, write our tiles out to the local file system.
    Pyramid.upLevels(reprojected, layoutScheme, zoom, NearestNeighbor) { (rdd, z) =>
      val layerId = LayerId(layerName, z)
      // If the layer exists already, delete it out before writing
      if(attributeStore.layerExists(layerId)) {
        new FileLayerManager(attributeStore).delete(layerId)
        // new HadoopLayerManager(attributeStore).delete(layerId)
      }
      writer.write(layerId, rdd, ZCurveKeyIndexMethod)
    }
  }
}
