package cn.lecosa.spark
import org.apache.spark.SparkConf
import org.apache.spark.SparkContext
import org.apache.spark.sql.SQLContext
import org.apache.spark.sql.SparkSession
import cn.lecosa.spark.mysql.JdbcUtil

object topN {
  case class Person(one: Int, two: Int, three: Int, four: Int)

  def main(args: Array[String]) {

        val conf =new SparkConf().setMaster("local[*]").setAppName(this.getClass.getName);
        
    val sc = new SparkContext(conf);
    val lines = sc.textFile("hdfs://park01:9000/home/topn", 3)
    val r1 = lines.filter(line => { line.trim.length() > 0 && line.split(",").length == 4 });
    //val r2=r1.map { x =>(x.split(",") (2).toInt,x) }.partitionBy(new HashPartitioner(1)).sortByKey(false)
    val sqlContext = new SQLContext(sc)
    val r2 = r1.map(_.split(",")).map(attr => { Person(attr(0).toInt, attr(1).toInt, attr(2).toInt, attr(3).toInt) })

    import sqlContext.implicits._
    val df = r2.toDF()
    df.createOrReplaceTempView("people")
    val spark = JdbcUtil.SparkSessionSingleton.getInstance(r2.sparkContext.getConf)
    val teenagersDF = spark.sql("SELECT * FROM people WHERE  two >1000 ")
    teenagersDF.show()
    r2.foreach { println }
    sc.stop();
  }

  // scalastyle:on println
}
