package cn.darksoul3.spark.cases

import org.apache.spark.rdd.RDD
import org.apache.spark.{Partitioner, SparkConf, SparkContext}

import java.util.concurrent.TimeUnit
import scala.collection.mutable


/**
 * tconst	titleType	primaryTitle	originalTitle	isAdult	startYear	endYear	runtimeMinutes	genres
 */
object CaseOfTopN {

  def main(args: Array[String]): Unit = {
    val conf = new SparkConf()
    conf.setAppName("group by key").setMaster("local[*]")

    val sc = new SparkContext(conf)

    val lines = sc.textFile("C:\\Users\\cary2\\Desktop\\title.basics.tsv\\data.tsv")
    val reduced: RDD[((String, Int), Int)] = lines.map(line => {
      val fields = line.split("\\s+")
      val titleType = fields(1)
      val runtimeMinutes = (fields.length - 2)
      ((titleType, runtimeMinutes), 1)
    }).reduceByKey(_ + _)

    val titleTypes = reduced.map(_._1._1).distinct().collect()
    val mapped = reduced.map(t => {
      ((t._1._1, t._1._2, t._2), null)
    })
    implicit val ordering: Ordering[(String, Int, Int)] = new Ordering[(String, Int, Int)] {
      override def compare(x: (String, Int, Int), y: (String, Int, Int)): Int = {
        val compare2 = x._2 - y._2
        if (compare2 != 0) return -compare2
        val compare3 = x._3 - y._3
        if (compare3 != 0) return -compare3
        0
      }
    }
    //implicit val ordering: Ordering[(String, Int, Int)] = Ordering[Int].on[(String,Int,Int)](t => -t._3)
    val tuples = mapped.repartitionAndSortWithinPartitions(new TriplePartitioner(titleTypes)).map(_._1)
    mapped.persist()

    tuples.saveAsTextFile("imdb-out")
    sc.stop()

  }
}