package sparkprograms.datapreprocessing

import org.apache.spark.{SparkConf, SparkContext}
import scala.collection.Iterator

/**
  * Created by yang on 17-3-25.
  */
object GetLineSegment {

  val conf = new SparkConf().setMaster("local[4]").setAppName("PointToSegment")
  val sc = new SparkContext(conf)

  def main(args: Array[String]): Unit = {
    val lines = sc.textFile("hdfs://localhost:9000/taxidata/shanghai/SortById/part-00000")
    val formatline = lines.map(f = line => {
      val point = line.split("\t")
      val id = point(0).toInt
      val time = point(1).toInt
      val lng = point(2).toFloat
      val lat = point(3).toFloat
      ((id),(time,lng,lat))
  }

  def pointToSeg[A](iter: Iterator[A]): Iterator[(Int, Int, Float, Float, Float, Float)] = {
    var res = List[(Int, Int, Float, Float, Float, Float)]()
    var (k1: Int, v1: Int, v2: Float, v3: Float) = iter.next
    while (iter.hasNext) {
      val (k2: Int, v4: Int, v5: Float, v6: Float) = iter.next
      if (k1.equals(k2)) {
        //val (id: Int, time: Int, s_x: Float, s_y: Float, e_x: Float, e_y: Float) = (k1, v4, v2, v3, v5, v6)
        val seg = (k1, v4, v2, v3, v5, v6)
        res = (k1, v4, v2, v3, v5, v6) +: res
      }
    }
    res.sort()
    return res.elements
  }

}
